├── .gitignore ├── README.md ├── classification ├── .gitignore ├── flops_counter.py ├── imagenet.py ├── imagenet_fast.py ├── imagenet_mobile.py ├── imagenet_mobile.py.bak ├── models │ ├── imagenet │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── common_head.py │ │ ├── resnet_bam.py │ │ ├── resnet_cbam.py │ │ ├── resnet_old.py │ │ ├── resnet_se.py │ │ ├── resnet_sge.py │ │ ├── resnet_sk.py │ │ ├── resnet_ws.py │ │ ├── shufflenetv2.py │ │ ├── shufflenetv2_bng2.py │ │ ├── shufflenetv2_gl4gbn.py │ │ └── shufflenetv2_se.py │ └── layers.py ├── pretrain_test.md ├── train.md └── utils │ ├── __init__.py │ ├── eval.py │ ├── images │ ├── cifar.png │ └── imagenet.png │ ├── logger.py │ ├── misc.py │ ├── progress │ ├── .gitignore │ ├── LICENSE │ ├── MANIFEST.in │ ├── README.rst │ ├── demo.gif │ ├── progress │ │ ├── __init__.py │ │ ├── bar.py │ │ ├── counter.py │ │ ├── helpers.py │ │ └── spinner.py │ ├── setup.py │ └── test_progress.py │ └── visualize.py ├── detection ├── .eggs │ ├── README.txt │ └── pytest_runner-5.1-py3.6.egg │ │ ├── EGG-INFO │ │ ├── LICENSE │ │ ├── PKG-INFO │ │ ├── RECORD │ │ ├── WHEEL │ │ ├── entry_points.txt │ │ ├── requires.txt │ │ └── top_level.txt │ │ └── ptr.py ├── .gitignore ├── INSTALL.md ├── README.md ├── compile.sh ├── local_configs │ ├── __pycache__ │ │ ├── cascade_rcnn_r101_fpn_20e_pretrain_bnss5v_resnet101.cpython-36.pyc │ │ ├── cascade_rcnn_r101_fpn_2x.cpython-36.pyc │ │ ├── cascade_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── cascade_rcnn_r50_fpn_20e.cpython-36.pyc │ │ ├── cascade_rcnn_r50_fpn_20e_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── faster_rcnn_r101_fpn_2x.cpython-36.pyc │ │ ├── faster_rcnn_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_1x.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_1x_addbottlenecksge64.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_1x_addbottlenecksge64_frozen0.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_1x_addfpnfinal64.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_1x_addstage64_fpnfinal64.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_1x_addstagesge64.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_2x.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_2x_addbottleneck64.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_2x_addstagesge64.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_2x_pretrain_gn4v_resnet50.cpython-36.pyc │ │ ├── faster_rcnn_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc │ │ ├── mask_rcnn_r101_fpn_2x.cpython-36.pyc │ │ ├── mask_rcnn_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc │ │ ├── mask_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── mask_rcnn_r50_fpn_2x.cpython-36.pyc │ │ ├── mask_rcnn_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── mask_rcnn_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc │ │ ├── retinanet_r101_fpn_2x.cpython-36.pyc │ │ ├── retinanet_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc │ │ ├── retinanet_r101_fpn_2x_pretrain_cbam_resnet101.cpython-36.pyc │ │ ├── retinanet_r101_fpn_2x_pretrain_gc_resnet101.cpython-36.pyc │ │ ├── retinanet_r101_fpn_2x_pretrain_se_resnet101.cpython-36.pyc │ │ ├── retinanet_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_nopre_bnss5v_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_nopre_se_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_pretrain_bam_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_pretrain_cbam_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_pretrain_gc_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc │ │ ├── retinanet_r50_fpn_2x_pretrain_sk_resnet50.cpython-36.pyc │ │ ├── retinanet_x101_32x4d_fpn_1x.cpython-36.pyc │ │ ├── retinanet_x101_32x4d_fpn_2x.cpython-36.pyc │ │ ├── retinanet_x101_64x4d_fpn_1x.cpython-36.pyc │ │ └── ssd512_coco.cpython-36.pyc │ ├── cascade_rcnn_r101_fpn_20e.py │ ├── cascade_rcnn_r101_fpn_20e_pretrain_sge_resnet101.py │ ├── cascade_rcnn_r50_fpn_20e.py │ ├── cascade_rcnn_r50_fpn_20e_pretrain_sge_resnet50.py │ ├── faster_rcnn_r101_fpn_2x.py │ ├── faster_rcnn_r101_fpn_2x_pretrain_sge_resnet101.py │ ├── faster_rcnn_r50_fpn_2x.py │ ├── faster_rcnn_r50_fpn_2x_pretrain_sge_resnet50.py │ ├── mask_rcnn_r101_fpn_2x.py │ ├── mask_rcnn_r101_fpn_2x_pretrain_sge_resnet101.py │ ├── mask_rcnn_r50_fpn_2x.py │ ├── mask_rcnn_r50_fpn_2x_pretrain_sge_resnet50.py │ ├── retinanet_r101_fpn_2x.py │ ├── retinanet_r101_fpn_2x_pretrain_cbam_resnet101.py │ ├── retinanet_r101_fpn_2x_pretrain_gc_resnet101.py │ ├── retinanet_r101_fpn_2x_pretrain_se_resnet101.py │ ├── retinanet_r101_fpn_2x_pretrain_sge_resnet101.py │ ├── retinanet_r50_fpn_2x.py │ ├── retinanet_r50_fpn_2x_pretrain_bam_resnet50.py │ ├── retinanet_r50_fpn_2x_pretrain_cbam_resnet50.py │ ├── retinanet_r50_fpn_2x_pretrain_gc_resnet50.py │ ├── retinanet_r50_fpn_2x_pretrain_se_resnet50.py │ ├── retinanet_r50_fpn_2x_pretrain_sge_resnet50.py │ └── retinanet_r50_fpn_2x_pretrain_sk_resnet50.py ├── mmdet │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── version.cpython-36.pyc │ ├── apis │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── env.cpython-36.pyc │ │ │ ├── inference.cpython-36.pyc │ │ │ └── train.cpython-36.pyc │ │ ├── env.py │ │ ├── inference.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ ├── anchor │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── anchor_generator.cpython-36.pyc │ │ │ │ └── anchor_target.cpython-36.pyc │ │ │ ├── anchor_generator.py │ │ │ └── anchor_target.py │ │ ├── bbox │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── assign_sampling.cpython-36.pyc │ │ │ │ ├── bbox_target.cpython-36.pyc │ │ │ │ ├── geometry.cpython-36.pyc │ │ │ │ └── transforms.cpython-36.pyc │ │ │ ├── assign_sampling.py │ │ │ ├── assigners │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ ├── assign_result.cpython-36.pyc │ │ │ │ │ ├── base_assigner.cpython-36.pyc │ │ │ │ │ └── max_iou_assigner.cpython-36.pyc │ │ │ │ ├── assign_result.py │ │ │ │ ├── base_assigner.py │ │ │ │ └── max_iou_assigner.py │ │ │ ├── bbox_target.py │ │ │ ├── geometry.py │ │ │ ├── samplers │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ ├── base_sampler.cpython-36.pyc │ │ │ │ │ ├── combined_sampler.cpython-36.pyc │ │ │ │ │ ├── instance_balanced_pos_sampler.cpython-36.pyc │ │ │ │ │ ├── iou_balanced_neg_sampler.cpython-36.pyc │ │ │ │ │ ├── ohem_sampler.cpython-36.pyc │ │ │ │ │ ├── pseudo_sampler.cpython-36.pyc │ │ │ │ │ ├── random_sampler.cpython-36.pyc │ │ │ │ │ └── sampling_result.cpython-36.pyc │ │ │ │ ├── base_sampler.py │ │ │ │ ├── combined_sampler.py │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ ├── ohem_sampler.py │ │ │ │ ├── pseudo_sampler.py │ │ │ │ ├── random_sampler.py │ │ │ │ └── sampling_result.py │ │ │ └── transforms.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── bbox_overlaps.cpython-36.pyc │ │ │ │ ├── class_names.cpython-36.pyc │ │ │ │ ├── coco_utils.cpython-36.pyc │ │ │ │ ├── eval_hooks.cpython-36.pyc │ │ │ │ ├── mean_ap.cpython-36.pyc │ │ │ │ └── recall.cpython-36.pyc │ │ │ ├── bbox_overlaps.py │ │ │ ├── class_names.py │ │ │ ├── coco_utils.py │ │ │ ├── eval_hooks.py │ │ │ ├── mean_ap.py │ │ │ └── recall.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── losses.cpython-36.pyc │ │ │ └── losses.py │ │ ├── mask │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── mask_target.cpython-36.pyc │ │ │ │ └── utils.cpython-36.pyc │ │ │ ├── mask_target.py │ │ │ └── utils.py │ │ ├── post_processing │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── bbox_nms.cpython-36.pyc │ │ │ │ └── merge_augs.cpython-36.pyc │ │ │ ├── bbox_nms.py │ │ │ └── merge_augs.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── dist_utils.cpython-36.pyc │ │ │ └── misc.cpython-36.pyc │ │ │ ├── dist_utils.py │ │ │ └── misc.py │ ├── datasets │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── coco.cpython-36.pyc │ │ │ ├── concat_dataset.cpython-36.pyc │ │ │ ├── custom.cpython-36.pyc │ │ │ ├── extra_aug.cpython-36.pyc │ │ │ ├── repeat_dataset.cpython-36.pyc │ │ │ ├── transforms.cpython-36.pyc │ │ │ ├── utils.cpython-36.pyc │ │ │ ├── voc.cpython-36.pyc │ │ │ └── xml_style.cpython-36.pyc │ │ ├── coco.py │ │ ├── concat_dataset.py │ │ ├── custom.py │ │ ├── extra_aug.py │ │ ├── loader │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── build_loader.cpython-36.pyc │ │ │ │ └── sampler.cpython-36.pyc │ │ │ ├── build_loader.py │ │ │ └── sampler.py │ │ ├── repeat_dataset.py │ │ ├── transforms.py │ │ ├── utils.py │ │ ├── voc.py │ │ └── xml_style.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── builder.cpython-36.pyc │ │ │ └── registry.cpython-36.pyc │ │ ├── anchor_heads │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── anchor_head.cpython-36.pyc │ │ │ │ ├── retina_head.cpython-36.pyc │ │ │ │ ├── rpn_head.cpython-36.pyc │ │ │ │ └── ssd_head.cpython-36.pyc │ │ │ ├── anchor_head.py │ │ │ ├── retina_head.py │ │ │ ├── rpn_head.py │ │ │ └── ssd_head.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── resnet.cpython-36.pyc │ │ │ │ ├── resnext.cpython-36.pyc │ │ │ │ └── ssd_vgg.cpython-36.pyc │ │ │ ├── bam.py │ │ │ ├── cbam.py │ │ │ ├── global_context.py │ │ │ ├── resnet.py │ │ │ ├── resnet_bam.py │ │ │ ├── resnet_cbam.py │ │ │ ├── resnet_gc.py │ │ │ ├── resnet_se.py │ │ │ ├── resnet_sge.py │ │ │ ├── resnet_sk.py │ │ │ ├── resnext.py │ │ │ └── ssd_vgg.py │ │ ├── bbox_heads │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── bbox_head.cpython-36.pyc │ │ │ │ └── convfc_bbox_head.cpython-36.pyc │ │ │ ├── bbox_head.py │ │ │ └── convfc_bbox_head.py │ │ ├── builder.py │ │ ├── detectors │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── base.cpython-36.pyc │ │ │ │ ├── cascade_rcnn.cpython-36.pyc │ │ │ │ ├── fast_rcnn.cpython-36.pyc │ │ │ │ ├── faster_rcnn.cpython-36.pyc │ │ │ │ ├── mask_rcnn.cpython-36.pyc │ │ │ │ ├── retinanet.cpython-36.pyc │ │ │ │ ├── rpn.cpython-36.pyc │ │ │ │ ├── single_stage.cpython-36.pyc │ │ │ │ ├── test_mixins.cpython-36.pyc │ │ │ │ └── two_stage.cpython-36.pyc │ │ │ ├── base.py │ │ │ ├── cascade_rcnn.py │ │ │ ├── fast_rcnn.py │ │ │ ├── faster_rcnn.py │ │ │ ├── mask_rcnn.py │ │ │ ├── retinanet.py │ │ │ ├── rpn.py │ │ │ ├── single_stage.py │ │ │ ├── test_mixins.py │ │ │ └── two_stage.py │ │ ├── mask_heads │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── fcn_mask_head.cpython-36.pyc │ │ │ └── fcn_mask_head.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── fpn.cpython-36.pyc │ │ │ ├── fpn.py │ │ │ └── fpn_sge.py │ │ ├── registry.py │ │ ├── roi_extractors │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── single_level.cpython-36.pyc │ │ │ └── single_level.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── conv_module.cpython-36.pyc │ │ │ ├── norm.cpython-36.pyc │ │ │ └── weight_init.cpython-36.pyc │ │ │ ├── conv_module.py │ │ │ ├── norm.py │ │ │ └── weight_init.py │ ├── ops │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ ├── dcn │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ └── __init__.cpython-36.pyc │ │ │ ├── deform_conv_cuda.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── deform_pool_cuda.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ ├── deform_conv.cpython-36.pyc │ │ │ │ │ └── deform_pool.cpython-36.pyc │ │ │ │ ├── deform_conv.py │ │ │ │ └── deform_pool.py │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ ├── deform_conv.cpython-36.pyc │ │ │ │ │ └── deform_pool.cpython-36.pyc │ │ │ │ ├── deform_conv.py │ │ │ │ └── deform_pool.py │ │ │ ├── setup.py │ │ │ └── src │ │ │ │ ├── deform_conv_cuda.cpp │ │ │ │ ├── deform_conv_cuda_kernel.cu │ │ │ │ ├── deform_pool_cuda.cpp │ │ │ │ └── deform_pool_cuda_kernel.cu │ │ ├── nms │ │ │ ├── .gitignore │ │ │ ├── Makefile │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── nms_wrapper.cpython-36.pyc │ │ │ ├── cpu_nms.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── cpu_nms.pyx │ │ │ ├── cpu_soft_nms.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── cpu_soft_nms.pyx │ │ │ ├── gpu_nms.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── gpu_nms.hpp │ │ │ ├── gpu_nms.pyx │ │ │ ├── nms_kernel.cu │ │ │ ├── nms_wrapper.py │ │ │ └── setup.py │ │ ├── roi_align │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ └── __init__.cpython-36.pyc │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ └── roi_align.cpython-36.pyc │ │ │ │ └── roi_align.py │ │ │ ├── gradcheck.py │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ └── roi_align.cpython-36.pyc │ │ │ │ └── roi_align.py │ │ │ ├── roi_align_cuda.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── setup.py │ │ │ └── src │ │ │ │ ├── roi_align_cuda.cpp │ │ │ │ └── roi_align_kernel.cu │ │ └── roi_pool │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ │ ├── functions │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── roi_pool.cpython-36.pyc │ │ │ └── roi_pool.py │ │ │ ├── gradcheck.py │ │ │ ├── modules │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── roi_pool.cpython-36.pyc │ │ │ └── roi_pool.py │ │ │ ├── roi_pool_cuda.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── setup.py │ │ │ └── src │ │ │ ├── roi_pool_cuda.cpp │ │ │ └── roi_pool_kernel.cu │ └── version.py ├── setup.py ├── tools │ ├── __init__.py │ ├── __pycache__ │ │ └── flops_counter.cpython-36.pyc │ ├── coco_eval.py │ ├── convert_datasets │ │ └── pascal_voc.py │ ├── dist_train.sh │ ├── test.py │ ├── train.py │ └── voc_eval.py └── val.md └── pretrain_log ├── bam_resnet101.log.txt ├── bam_resnet50.log.txt ├── cbam_resnet101.log.txt ├── cbam_resnet50.log.txt ├── old_resnet101.log.txt ├── old_resnet50.log.txt ├── se_resnet101.log.txt ├── sge_resnet101.log.txt ├── sge_resnet50.log.txt ├── shufflenetv2_1x.log.txt ├── sk_resnet101.log.txt └── sk_resnet50.log.txt /.gitignore: -------------------------------------------------------------------------------- 1 | pretrain 2 | pretrain/* 3 | -------------------------------------------------------------------------------- /classification/.gitignore: -------------------------------------------------------------------------------- 1 | checkpoints/* 2 | checkpoint/* 3 | job* 4 | logs/* 5 | __pycache__ 6 | -------------------------------------------------------------------------------- /classification/models/imagenet/.gitignore: -------------------------------------------------------------------------------- 1 | resnet_gl.py 2 | -------------------------------------------------------------------------------- /classification/models/imagenet/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .resnet_old import * 4 | from .resnet_sge import * 5 | from .resnet_se import * 6 | from .resnet_cbam import * 7 | from .resnet_bam import * 8 | from .resnet_sk import * 9 | 10 | from .resnet_ws import * 11 | 12 | from .shufflenetv2 import * 13 | -------------------------------------------------------------------------------- /classification/models/imagenet/common_head.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.utils.model_zoo as model_zoo 3 | from torch.nn.parameter import Parameter 4 | import torch 5 | import torch.nn.functional as F 6 | from torch.nn import init 7 | from torch.autograd import Variable 8 | from collections import OrderedDict 9 | import math 10 | 11 | -------------------------------------------------------------------------------- /classification/models/layers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn.parameter import Parameter 4 | from torch.nn import functional as F 5 | from torch.nn import init 6 | import math 7 | 8 | class Conv2d(nn.Conv2d): 9 | def __init__(self, in_channels, out_channels, kernel_size, stride=1, 10 | padding=0, dilation=1, groups=1, bias=True): 11 | super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, 12 | padding, dilation, groups, bias) 13 | 14 | def forward(self, x): 15 | # return super(Conv2d, self).forward(x) 16 | weight = self.weight 17 | weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, 18 | keepdim=True).mean(dim=3, keepdim=True) 19 | weight = weight - weight_mean 20 | std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5 21 | weight = weight / std.expand_as(weight) 22 | return F.conv2d(x, weight, self.bias, self.stride, 23 | self.padding, self.dilation, self.groups) 24 | 25 | 26 | class A1Conv2d(nn.Conv2d): 27 | def __init__(self, in_channels, out_channels, kernel_size, stride=1, 28 | padding=0, dilation=1, groups=1, bias=True): 29 | super(A1Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, 30 | padding, dilation, groups, bias) 31 | 32 | sz = self.weight.size() 33 | d = 1.0 34 | for v in sz: 35 | d *= v 36 | print('self.d = ', d, '->', math.sqrt(d)) 37 | self.d = math.sqrt(d) 38 | 39 | def forward(self, x): 40 | # return super(Conv2d, self).forward(x) 41 | weight = self.weight 42 | 43 | weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, 44 | keepdim=True).mean(dim=3, keepdim=True) 45 | weight = weight - weight_mean 46 | std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) * self.d + 1e-5 47 | weight = weight / std.expand_as(weight) 48 | return F.conv2d(x, weight, self.bias, self.stride, 49 | self.padding, self.dilation, self.groups) 50 | 51 | 52 | def BatchNorm2d(num_features): 53 | return nn.GroupNorm(num_channels=num_features, num_groups=32) 54 | 55 | -------------------------------------------------------------------------------- /classification/pretrain_test.md: -------------------------------------------------------------------------------- 1 | ## Examples: 2 | ``` 3 | python -m torch.distributed.launch --nproc_per_node=2 imagenet_mobile.py -a shufflenetv2_1x --data /share1/classification_data/imagenet1k/ -e --resume ../pretrain/shufflenetv2_1x.pth.tar --test-batch 100 --opt-level O0 4 | 5 | 6 | python -W ignore imagenet.py -a sge_resnet101 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/sge_resnet101.pth.tar 7 | 8 | python -W ignore imagenet.py -a old_resnet50 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/old_resnet50.pth.tar 9 | 10 | python -W ignore imagenet.py -a old_resnet101 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/old_resnet101.pth.tar 11 | 12 | python -W ignore imagenet.py -a sk_resnet50 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/sk_resnet50.pth.tar 13 | 14 | python -W ignore imagenet.py -a sk_resnet101 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/sk_resnet101.pth.tar 15 | 16 | python -W ignore imagenet.py -a cbam_resnet50 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/cbam_resnet50.pth.tar 17 | 18 | python -W ignore imagenet.py -a cbam_resnet101 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/cbam_resnet101.pth.tar 19 | 20 | python -W ignore imagenet.py -a bam_resnet50 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/bam_resnet50.pth.tar 21 | 22 | python -W ignore imagenet.py -a bam_resnet101 --data /share1/classification_data/imagenet1k/ --gpu-id 0 -e --resume ../pretrain/bam_resnet101.pth.tar 23 | ``` 24 | -------------------------------------------------------------------------------- /classification/train.md: -------------------------------------------------------------------------------- 1 | 2 | ## Small Models 3 | ### ShuffleNetV2 4 | ``` 5 | python -m torch.distributed.launch --nproc_per_node=8 imagenet_mobile.py --cos -a shufflenetv2_1x --data /share1/classification_data/imagenet1k/ --epochs 300 --wd 4e-5 --gamma 0.1 -c checkpoints/imagenet/shufflenetv2_1x --train-batch 128 --opt-level O0 --nowd-bn 6 | ``` 7 | 8 | 9 | ## Large Models 10 | ### SGE-ResNet 11 | ``` 12 | python -W ignore imagenet.py -a sge_resnet101 --data /share1/classification_data/imagenet1k/ --epochs 100 --schedule 30 60 90 --gamma 0.1 -c checkpoints/imagenet/sge_resnet101 --gpu-id 0,1,2,3,4,5,6,7 13 | ``` 14 | or faster 15 | ``` 16 | python -m torch.distributed.launch --nproc_per_node=8 imagenet_fast.py -a sge_resnet50 --data /share1/classification_data/imagenet1k/ --epochs 100 --schedule 30 60 90 --wd 1e-4 --gamma 0.1 -c checkpoints/imagenet/sge_resnet50 --train-batch 32 --opt-level O0 --wd-all --label-smoothing 0. --warmup 0 17 | ``` 18 | 19 | ### WS-ResNet with e-shifted L2 regularizer, e = 1e-3 20 | ``` 21 | python -m torch.distributed.launch --nproc_per_node=8 imagenet_fast.py -a ws_resnet50 --data /share1/public/public/imagenet1k/ --epochs 100 --schedule 30 60 90 --wd 1e-4 --gamma 0.1 -c checkpoints/imagenet/es1e-3_ws_resnet50 --train-batch 32 --opt-level O0 --label-smoothing 0. --warmup 0 --nowd-conv --mineps 1e-3 --el2 22 | ``` 23 | -------------------------------------------------------------------------------- /classification/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Useful utils 2 | """ 3 | from .misc import * 4 | from .logger import * 5 | from .visualize import * 6 | from .eval import * 7 | 8 | # progress bar 9 | import os, sys 10 | sys.path.append(os.path.join(os.path.dirname(__file__), "progress")) 11 | from progress.bar import Bar as Bar -------------------------------------------------------------------------------- /classification/utils/eval.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | 3 | __all__ = ['accuracy'] 4 | 5 | def accuracy(output, target, topk=(1,)): 6 | """Computes the precision@k for the specified values of k""" 7 | maxk = max(topk) 8 | batch_size = target.size(0) 9 | 10 | _, pred = output.topk(maxk, 1, True, True) 11 | pred = pred.t() 12 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 13 | 14 | res = [] 15 | for k in topk: 16 | correct_k = correct[:k].view(-1).float().sum(0) 17 | res.append(correct_k.mul_(100.0 / batch_size)) 18 | return res -------------------------------------------------------------------------------- /classification/utils/images/cifar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/classification/utils/images/cifar.png -------------------------------------------------------------------------------- /classification/utils/images/imagenet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/classification/utils/images/imagenet.png -------------------------------------------------------------------------------- /classification/utils/misc.py: -------------------------------------------------------------------------------- 1 | '''Some helper functions for PyTorch, including: 2 | - get_mean_and_std: calculate the mean and std value of dataset. 3 | - msr_init: net parameter initialization. 4 | - progress_bar: progress bar mimic xlua.progress. 5 | ''' 6 | import errno 7 | import os 8 | import sys 9 | import time 10 | import math 11 | 12 | import torch.nn as nn 13 | import torch.nn.init as init 14 | from torch.autograd import Variable 15 | 16 | __all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter'] 17 | 18 | 19 | def get_mean_and_std(dataset): 20 | '''Compute the mean and std value of dataset.''' 21 | dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2) 22 | 23 | mean = torch.zeros(3) 24 | std = torch.zeros(3) 25 | print('==> Computing mean and std..') 26 | for inputs, targets in dataloader: 27 | for i in range(3): 28 | mean[i] += inputs[:,i,:,:].mean() 29 | std[i] += inputs[:,i,:,:].std() 30 | mean.div_(len(dataset)) 31 | std.div_(len(dataset)) 32 | return mean, std 33 | 34 | def init_params(net): 35 | '''Init layer parameters.''' 36 | for m in net.modules(): 37 | if isinstance(m, nn.Conv2d): 38 | init.kaiming_normal(m.weight, mode='fan_out') 39 | if m.bias: 40 | init.constant(m.bias, 0) 41 | elif isinstance(m, nn.BatchNorm2d): 42 | init.constant(m.weight, 1) 43 | init.constant(m.bias, 0) 44 | elif isinstance(m, nn.Linear): 45 | init.normal(m.weight, std=1e-3) 46 | if m.bias: 47 | init.constant(m.bias, 0) 48 | 49 | def mkdir_p(path): 50 | '''make dir if not exist''' 51 | try: 52 | os.makedirs(path) 53 | except OSError as exc: # Python >2.5 54 | if exc.errno == errno.EEXIST and os.path.isdir(path): 55 | pass 56 | else: 57 | raise 58 | 59 | class AverageMeter(object): 60 | """Computes and stores the average and current value 61 | Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262 62 | """ 63 | def __init__(self): 64 | self.reset() 65 | 66 | def reset(self): 67 | self.val = 0 68 | self.avg = 0 69 | self.sum = 0 70 | self.count = 0 71 | 72 | def update(self, val, n=1): 73 | self.val = val 74 | self.sum += val * n 75 | self.count += n 76 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /classification/utils/progress/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.egg-info 3 | build/ 4 | dist/ 5 | -------------------------------------------------------------------------------- /classification/utils/progress/LICENSE: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012 Giorgos Verigakis 2 | # 3 | # Permission to use, copy, modify, and distribute this software for any 4 | # purpose with or without fee is hereby granted, provided that the above 5 | # copyright notice and this permission notice appear in all copies. 6 | # 7 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | -------------------------------------------------------------------------------- /classification/utils/progress/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst LICENSE 2 | -------------------------------------------------------------------------------- /classification/utils/progress/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/classification/utils/progress/demo.gif -------------------------------------------------------------------------------- /classification/utils/progress/progress/bar.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Progress 19 | from .helpers import WritelnMixin 20 | 21 | 22 | class Bar(WritelnMixin, Progress): 23 | width = 32 24 | message = '' 25 | suffix = '%(index)d/%(max)d' 26 | bar_prefix = ' |' 27 | bar_suffix = '| ' 28 | empty_fill = ' ' 29 | fill = '#' 30 | hide_cursor = True 31 | 32 | def update(self): 33 | filled_length = int(self.width * self.progress) 34 | empty_length = self.width - filled_length 35 | 36 | message = self.message % self 37 | bar = self.fill * filled_length 38 | empty = self.empty_fill * empty_length 39 | suffix = self.suffix % self 40 | line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, 41 | suffix]) 42 | self.writeln(line) 43 | 44 | 45 | class ChargingBar(Bar): 46 | suffix = '%(percent)d%%' 47 | bar_prefix = ' ' 48 | bar_suffix = ' ' 49 | empty_fill = '∙' 50 | fill = '█' 51 | 52 | 53 | class FillingSquaresBar(ChargingBar): 54 | empty_fill = '▢' 55 | fill = '▣' 56 | 57 | 58 | class FillingCirclesBar(ChargingBar): 59 | empty_fill = '◯' 60 | fill = '◉' 61 | 62 | 63 | class IncrementalBar(Bar): 64 | phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█') 65 | 66 | def update(self): 67 | nphases = len(self.phases) 68 | filled_len = self.width * self.progress 69 | nfull = int(filled_len) # Number of full chars 70 | phase = int((filled_len - nfull) * nphases) # Phase of last char 71 | nempty = self.width - nfull # Number of empty chars 72 | 73 | message = self.message % self 74 | bar = self.phases[-1] * nfull 75 | current = self.phases[phase] if phase > 0 else '' 76 | empty = self.empty_fill * max(0, nempty - len(current)) 77 | suffix = self.suffix % self 78 | line = ''.join([message, self.bar_prefix, bar, current, empty, 79 | self.bar_suffix, suffix]) 80 | self.writeln(line) 81 | 82 | 83 | class PixelBar(IncrementalBar): 84 | phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿') 85 | 86 | 87 | class ShadyBar(IncrementalBar): 88 | phases = (' ', '░', '▒', '▓', '█') 89 | -------------------------------------------------------------------------------- /classification/utils/progress/progress/counter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite, Progress 19 | from .helpers import WriteMixin 20 | 21 | 22 | class Counter(WriteMixin, Infinite): 23 | message = '' 24 | hide_cursor = True 25 | 26 | def update(self): 27 | self.write(str(self.index)) 28 | 29 | 30 | class Countdown(WriteMixin, Progress): 31 | hide_cursor = True 32 | 33 | def update(self): 34 | self.write(str(self.remaining)) 35 | 36 | 37 | class Stack(WriteMixin, Progress): 38 | phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') 39 | hide_cursor = True 40 | 41 | def update(self): 42 | nphases = len(self.phases) 43 | i = min(nphases - 1, int(self.progress * nphases)) 44 | self.write(self.phases[i]) 45 | 46 | 47 | class Pie(Stack): 48 | phases = ('○', '◔', '◑', '◕', '●') 49 | -------------------------------------------------------------------------------- /classification/utils/progress/progress/helpers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012 Giorgos Verigakis 2 | # 3 | # Permission to use, copy, modify, and distribute this software for any 4 | # purpose with or without fee is hereby granted, provided that the above 5 | # copyright notice and this permission notice appear in all copies. 6 | # 7 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | from __future__ import print_function 16 | 17 | 18 | HIDE_CURSOR = '\x1b[?25l' 19 | SHOW_CURSOR = '\x1b[?25h' 20 | 21 | 22 | class WriteMixin(object): 23 | hide_cursor = False 24 | 25 | def __init__(self, message=None, **kwargs): 26 | super(WriteMixin, self).__init__(**kwargs) 27 | self._width = 0 28 | if message: 29 | self.message = message 30 | 31 | if self.file.isatty(): 32 | if self.hide_cursor: 33 | print(HIDE_CURSOR, end='', file=self.file) 34 | print(self.message, end='', file=self.file) 35 | self.file.flush() 36 | 37 | def write(self, s): 38 | if self.file.isatty(): 39 | b = '\b' * self._width 40 | c = s.ljust(self._width) 41 | print(b + c, end='', file=self.file) 42 | self._width = max(self._width, len(s)) 43 | self.file.flush() 44 | 45 | def finish(self): 46 | if self.file.isatty() and self.hide_cursor: 47 | print(SHOW_CURSOR, end='', file=self.file) 48 | 49 | 50 | class WritelnMixin(object): 51 | hide_cursor = False 52 | 53 | def __init__(self, message=None, **kwargs): 54 | super(WritelnMixin, self).__init__(**kwargs) 55 | if message: 56 | self.message = message 57 | 58 | if self.file.isatty() and self.hide_cursor: 59 | print(HIDE_CURSOR, end='', file=self.file) 60 | 61 | def clearln(self): 62 | if self.file.isatty(): 63 | print('\r\x1b[K', end='', file=self.file) 64 | 65 | def writeln(self, line): 66 | if self.file.isatty(): 67 | self.clearln() 68 | print(line, end='', file=self.file) 69 | self.file.flush() 70 | 71 | def finish(self): 72 | if self.file.isatty(): 73 | print(file=self.file) 74 | if self.hide_cursor: 75 | print(SHOW_CURSOR, end='', file=self.file) 76 | 77 | 78 | from signal import signal, SIGINT 79 | from sys import exit 80 | 81 | 82 | class SigIntMixin(object): 83 | """Registers a signal handler that calls finish on SIGINT""" 84 | 85 | def __init__(self, *args, **kwargs): 86 | super(SigIntMixin, self).__init__(*args, **kwargs) 87 | signal(SIGINT, self._sigint_handler) 88 | 89 | def _sigint_handler(self, signum, frame): 90 | self.finish() 91 | exit(0) 92 | -------------------------------------------------------------------------------- /classification/utils/progress/progress/spinner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright (c) 2012 Giorgos Verigakis 4 | # 5 | # Permission to use, copy, modify, and distribute this software for any 6 | # purpose with or without fee is hereby granted, provided that the above 7 | # copyright notice and this permission notice appear in all copies. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | from __future__ import unicode_literals 18 | from . import Infinite 19 | from .helpers import WriteMixin 20 | 21 | 22 | class Spinner(WriteMixin, Infinite): 23 | message = '' 24 | phases = ('-', '\\', '|', '/') 25 | hide_cursor = True 26 | 27 | def update(self): 28 | i = self.index % len(self.phases) 29 | self.write(self.phases[i]) 30 | 31 | 32 | class PieSpinner(Spinner): 33 | phases = ['◷', '◶', '◵', '◴'] 34 | 35 | 36 | class MoonSpinner(Spinner): 37 | phases = ['◑', '◒', '◐', '◓'] 38 | 39 | 40 | class LineSpinner(Spinner): 41 | phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] 42 | 43 | class PixelSpinner(Spinner): 44 | phases = ['⣾','⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] 45 | -------------------------------------------------------------------------------- /classification/utils/progress/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | 5 | import progress 6 | 7 | 8 | setup( 9 | name='progress', 10 | version=progress.__version__, 11 | description='Easy to use progress bars', 12 | long_description=open('README.rst').read(), 13 | author='Giorgos Verigakis', 14 | author_email='verigak@gmail.com', 15 | url='http://github.com/verigak/progress/', 16 | license='ISC', 17 | packages=['progress'], 18 | classifiers=[ 19 | 'Environment :: Console', 20 | 'Intended Audience :: Developers', 21 | 'License :: OSI Approved :: ISC License (ISCL)', 22 | 'Programming Language :: Python :: 2.6', 23 | 'Programming Language :: Python :: 2.7', 24 | 'Programming Language :: Python :: 3.3', 25 | 'Programming Language :: Python :: 3.4', 26 | 'Programming Language :: Python :: 3.5', 27 | 'Programming Language :: Python :: 3.6', 28 | ] 29 | ) 30 | -------------------------------------------------------------------------------- /classification/utils/progress/test_progress.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import print_function 4 | 5 | import random 6 | import time 7 | 8 | from progress.bar import (Bar, ChargingBar, FillingSquaresBar, 9 | FillingCirclesBar, IncrementalBar, PixelBar, 10 | ShadyBar) 11 | from progress.spinner import (Spinner, PieSpinner, MoonSpinner, LineSpinner, 12 | PixelSpinner) 13 | from progress.counter import Counter, Countdown, Stack, Pie 14 | 15 | 16 | def sleep(): 17 | t = 0.01 18 | t += t * random.uniform(-0.1, 0.1) # Add some variance 19 | time.sleep(t) 20 | 21 | 22 | for bar_cls in (Bar, ChargingBar, FillingSquaresBar, FillingCirclesBar): 23 | suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]' 24 | bar = bar_cls(bar_cls.__name__, suffix=suffix) 25 | for i in bar.iter(range(200)): 26 | sleep() 27 | 28 | for bar_cls in (IncrementalBar, PixelBar, ShadyBar): 29 | suffix = '%(percent)d%% [%(elapsed_td)s / %(eta)d / %(eta_td)s]' 30 | bar = bar_cls(bar_cls.__name__, suffix=suffix) 31 | for i in bar.iter(range(200)): 32 | sleep() 33 | 34 | for spin in (Spinner, PieSpinner, MoonSpinner, LineSpinner, PixelSpinner): 35 | for i in spin(spin.__name__ + ' ').iter(range(100)): 36 | sleep() 37 | print() 38 | 39 | for singleton in (Counter, Countdown, Stack, Pie): 40 | for i in singleton(singleton.__name__ + ' ').iter(range(100)): 41 | sleep() 42 | print() 43 | 44 | bar = IncrementalBar('Random', suffix='%(index)d') 45 | for i in range(100): 46 | bar.goto(random.randint(0, 100)) 47 | sleep() 48 | bar.finish() 49 | -------------------------------------------------------------------------------- /detection/.eggs/README.txt: -------------------------------------------------------------------------------- 1 | This directory contains eggs that were downloaded by setuptools to build, test, and run plug-ins. 2 | 3 | This directory caches those eggs to prevent repeated downloads. 4 | 5 | However, it is safe to delete this directory. 6 | 7 | -------------------------------------------------------------------------------- /detection/.eggs/pytest_runner-5.1-py3.6.egg/EGG-INFO/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Jason R. Coombs 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /detection/.eggs/pytest_runner-5.1-py3.6.egg/EGG-INFO/RECORD: -------------------------------------------------------------------------------- 1 | ptr.py,sha256=6BlnIXLmJ7aVbpXERfPcRO3N3Mn7mXP1jtbeC5GvK8E,6867 2 | pytest_runner-5.1.dist-info/LICENSE,sha256=pV4v_ptEmY5iHVHYwJS-0JrMS1I27nPX3zlaM7o8GP0,1050 3 | pytest_runner-5.1.dist-info/METADATA,sha256=fGTMJUwoeOKEh-ZbS2PopivjJqlTi3xr_vAzntWZBV4,5509 4 | pytest_runner-5.1.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110 5 | pytest_runner-5.1.dist-info/entry_points.txt,sha256=JLlBw8SyLCUnYguY0hs5dVnA9dMbhuEVPnDjojJbXHw,59 6 | pytest_runner-5.1.dist-info/top_level.txt,sha256=DPzHbWlKG8yq8EOD5UgEvVNDWeJRPyimrwfShwV6Iuw,4 7 | pytest_runner-5.1.dist-info/RECORD,, 8 | -------------------------------------------------------------------------------- /detection/.eggs/pytest_runner-5.1-py3.6.egg/EGG-INFO/WHEEL: -------------------------------------------------------------------------------- 1 | Wheel-Version: 1.0 2 | Generator: bdist_wheel (0.33.4) 3 | Root-Is-Purelib: true 4 | Tag: py2-none-any 5 | Tag: py3-none-any 6 | 7 | -------------------------------------------------------------------------------- /detection/.eggs/pytest_runner-5.1-py3.6.egg/EGG-INFO/entry_points.txt: -------------------------------------------------------------------------------- 1 | [distutils.commands] 2 | ptr = ptr:PyTest 3 | pytest = ptr:PyTest 4 | 5 | -------------------------------------------------------------------------------- /detection/.eggs/pytest_runner-5.1-py3.6.egg/EGG-INFO/requires.txt: -------------------------------------------------------------------------------- 1 | 2 | [docs] 3 | jaraco.packaging>=3.2 4 | rst.linker>=1.9 5 | sphinx 6 | 7 | [testing] 8 | pytest!=3.7.3,>=3.5 9 | pytest-black-multipy 10 | pytest-checkdocs 11 | pytest-flake8 12 | pytest-virtualenv 13 | -------------------------------------------------------------------------------- /detection/.eggs/pytest_runner-5.1-py3.6.egg/EGG-INFO/top_level.txt: -------------------------------------------------------------------------------- 1 | ptr 2 | -------------------------------------------------------------------------------- /detection/.gitignore: -------------------------------------------------------------------------------- 1 | job* 2 | jobs/ 3 | work_dirs/ 4 | logs/ 5 | outs/ 6 | data/ 7 | venv/ 8 | build/ 9 | dist/ 10 | .egg/ 11 | egg/ 12 | *.egg-info/ 13 | -------------------------------------------------------------------------------- /detection/INSTALL.md: -------------------------------------------------------------------------------- 1 | ## Installation 2 | 3 | ### Requirements 4 | 5 | - Linux (tested on Ubuntu 16.04 and CentOS 7.2) 6 | - Python 3.4+ 7 | - PyTorch 1.0 8 | - Cython 9 | - [mmcv](https://github.com/open-mmlab/mmcv) >= 0.2.2 10 | 11 | ### Install mmdetection 12 | 13 | a. Install PyTorch 1.0 and torchvision following the [official instructions](https://pytorch.org/). 14 | 15 | b. Compile cuda extensions. 16 | 17 | ```shell 18 | cd mmdetection 19 | pip install cython # or "conda install cython" if you prefer conda 20 | ./compile.sh # or "PYTHON=python3 ./compile.sh" if you use system python3 without virtual environments 21 | ``` 22 | 23 | c. Install mmdetection (other dependencies will be installed automatically). 24 | 25 | ```shell 26 | python(3) setup.py install # add --user if you want to install it locally 27 | # or "pip install ." 28 | ``` 29 | 30 | Note: You need to run the last step each time you pull updates from github. 31 | The git commit id will be written to the version number and also saved in trained models. 32 | 33 | ### Prepare COCO dataset. 34 | 35 | It is recommended to symlink the dataset root to `$MMDETECTION/data`. 36 | 37 | ``` 38 | mmdetection 39 | ├── mmdet 40 | ├── tools 41 | ├── configs 42 | ├── data 43 | │ ├── coco 44 | │ │ ├── annotations 45 | │ │ ├── train2017 46 | │ │ ├── val2017 47 | │ │ ├── test2017 48 | │ ├── VOCdevkit 49 | │ │ ├── VOC2007 50 | │ │ ├── VOC2012 51 | 52 | ``` 53 | -------------------------------------------------------------------------------- /detection/compile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | echo "Building roi align op..." 6 | cd mmdet/ops/roi_align 7 | if [ -d "build" ]; then 8 | rm -r build 9 | fi 10 | $PYTHON setup.py build_ext --inplace 11 | 12 | echo "Building roi pool op..." 13 | cd ../roi_pool 14 | if [ -d "build" ]; then 15 | rm -r build 16 | fi 17 | $PYTHON setup.py build_ext --inplace 18 | 19 | echo "Building nms op..." 20 | cd ../nms 21 | make clean 22 | make PYTHON=${PYTHON} 23 | 24 | echo "Building dcn..." 25 | cd ../dcn 26 | if [ -d "build" ]; then 27 | rm -r build 28 | fi 29 | $PYTHON setup.py build_ext --inplace 30 | -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/cascade_rcnn_r101_fpn_20e_pretrain_bnss5v_resnet101.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/cascade_rcnn_r101_fpn_20e_pretrain_bnss5v_resnet101.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/cascade_rcnn_r101_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/cascade_rcnn_r101_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/cascade_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/cascade_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/cascade_rcnn_r50_fpn_20e.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/cascade_rcnn_r50_fpn_20e.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/cascade_rcnn_r50_fpn_20e_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/cascade_rcnn_r50_fpn_20e_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r101_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r101_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addbottlenecksge64.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addbottlenecksge64.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addbottlenecksge64_frozen0.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addbottlenecksge64_frozen0.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addfpnfinal64.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addfpnfinal64.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addstage64_fpnfinal64.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addstage64_fpnfinal64.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addstagesge64.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_addstagesge64.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_addbottleneck64.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_addbottleneck64.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_addstagesge64.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_addstagesge64.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_pretrain_gn4v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_pretrain_gn4v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/faster_rcnn_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/mask_rcnn_r101_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/mask_rcnn_r101_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/mask_rcnn_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/mask_rcnn_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/mask_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/mask_rcnn_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/mask_rcnn_r50_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/mask_rcnn_r50_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/mask_rcnn_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/mask_rcnn_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/mask_rcnn_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/mask_rcnn_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r101_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r101_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_bnss5v_resnet101.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_cbam_resnet101.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_cbam_resnet101.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_gc_resnet101.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_gc_resnet101.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_se_resnet101.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r101_fpn_2x_pretrain_se_resnet101.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_1x_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_nopre_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_nopre_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_nopre_se_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_nopre_se_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_bam_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_bam_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_bnss5v_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_cbam_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_cbam_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_gc_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_gc_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_se_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_sk_resnet50.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_r50_fpn_2x_pretrain_sk_resnet50.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_x101_32x4d_fpn_1x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_x101_32x4d_fpn_1x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_x101_32x4d_fpn_2x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_x101_32x4d_fpn_2x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/retinanet_x101_64x4d_fpn_1x.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/retinanet_x101_64x4d_fpn_1x.cpython-36.pyc -------------------------------------------------------------------------------- /detection/local_configs/__pycache__/ssd512_coco.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/local_configs/__pycache__/ssd512_coco.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__, short_version 2 | 3 | __all__ = ['__version__', 'short_version'] 4 | -------------------------------------------------------------------------------- /detection/mmdet/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/__pycache__/version.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/__pycache__/version.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .env import init_dist, get_root_logger, set_random_seed 2 | from .train import train_detector 3 | from .inference import inference_detector, show_result 4 | 5 | __all__ = [ 6 | 'init_dist', 'get_root_logger', 'set_random_seed', 'train_detector', 7 | 'inference_detector', 'show_result' 8 | ] 9 | -------------------------------------------------------------------------------- /detection/mmdet/apis/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/apis/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/apis/__pycache__/env.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/apis/__pycache__/env.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/apis/__pycache__/inference.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/apis/__pycache__/inference.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/apis/__pycache__/train.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/apis/__pycache__/train.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/apis/env.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import random 4 | 5 | import numpy as np 6 | import torch 7 | import torch.distributed as dist 8 | import torch.multiprocessing as mp 9 | from mmcv.runner import get_dist_info 10 | 11 | 12 | def init_dist(launcher, backend='nccl', **kwargs): 13 | if mp.get_start_method(allow_none=True) is None: 14 | mp.set_start_method('spawn') 15 | if launcher == 'pytorch': 16 | _init_dist_pytorch(backend, **kwargs) 17 | elif launcher == 'mpi': 18 | _init_dist_mpi(backend, **kwargs) 19 | elif launcher == 'slurm': 20 | _init_dist_slurm(backend, **kwargs) 21 | else: 22 | raise ValueError('Invalid launcher type: {}'.format(launcher)) 23 | 24 | 25 | def _init_dist_pytorch(backend, **kwargs): 26 | # TODO: use local_rank instead of rank % num_gpus 27 | rank = int(os.environ['RANK']) 28 | num_gpus = torch.cuda.device_count() 29 | torch.cuda.set_device(rank % num_gpus) 30 | dist.init_process_group(backend=backend, **kwargs) 31 | 32 | 33 | def _init_dist_mpi(backend, **kwargs): 34 | raise NotImplementedError 35 | 36 | 37 | def _init_dist_slurm(backend, **kwargs): 38 | raise NotImplementedError 39 | 40 | 41 | def set_random_seed(seed): 42 | random.seed(seed) 43 | np.random.seed(seed) 44 | torch.manual_seed(seed) 45 | torch.cuda.manual_seed_all(seed) 46 | 47 | 48 | def get_root_logger(log_level=logging.INFO): 49 | logger = logging.getLogger() 50 | if not logger.hasHandlers(): 51 | logging.basicConfig( 52 | format='%(asctime)s - %(levelname)s - %(message)s', 53 | level=log_level) 54 | rank, _ = get_dist_info() 55 | if rank != 0: 56 | logger.setLevel('ERROR') 57 | return logger 58 | -------------------------------------------------------------------------------- /detection/mmdet/apis/inference.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | import numpy as np 3 | import pycocotools.mask as maskUtils 4 | import torch 5 | 6 | from mmdet.core import get_classes 7 | from mmdet.datasets import to_tensor 8 | from mmdet.datasets.transforms import ImageTransform 9 | 10 | 11 | def _prepare_data(img, img_transform, cfg, device): 12 | ori_shape = img.shape 13 | img, img_shape, pad_shape, scale_factor = img_transform( 14 | img, 15 | scale=cfg.data.test.img_scale, 16 | keep_ratio=cfg.data.test.get('resize_keep_ratio', True)) 17 | img = to_tensor(img).to(device).unsqueeze(0) 18 | img_meta = [ 19 | dict( 20 | ori_shape=ori_shape, 21 | img_shape=img_shape, 22 | pad_shape=pad_shape, 23 | scale_factor=scale_factor, 24 | flip=False) 25 | ] 26 | return dict(img=[img], img_meta=[img_meta]) 27 | 28 | 29 | def _inference_single(model, img, img_transform, cfg, device): 30 | img = mmcv.imread(img) 31 | data = _prepare_data(img, img_transform, cfg, device) 32 | with torch.no_grad(): 33 | result = model(return_loss=False, rescale=True, **data) 34 | return result 35 | 36 | 37 | def _inference_generator(model, imgs, img_transform, cfg, device): 38 | for img in imgs: 39 | yield _inference_single(model, img, img_transform, cfg, device) 40 | 41 | 42 | def inference_detector(model, imgs, cfg, device='cuda:0'): 43 | img_transform = ImageTransform( 44 | size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg) 45 | model = model.to(device) 46 | model.eval() 47 | 48 | if not isinstance(imgs, list): 49 | return _inference_single(model, imgs, img_transform, cfg, device) 50 | else: 51 | return _inference_generator(model, imgs, img_transform, cfg, device) 52 | 53 | 54 | def show_result(img, result, dataset='coco', score_thr=0.3, out_file=None): 55 | img = mmcv.imread(img) 56 | class_names = get_classes(dataset) 57 | if isinstance(result, tuple): 58 | bbox_result, segm_result = result 59 | else: 60 | bbox_result, segm_result = result, None 61 | bboxes = np.vstack(bbox_result) 62 | # draw segmentation masks 63 | if segm_result is not None: 64 | segms = mmcv.concat_list(segm_result) 65 | inds = np.where(bboxes[:, -1] > score_thr)[0] 66 | for i in inds: 67 | color_mask = np.random.randint( 68 | 0, 256, (1, 3), dtype=np.uint8) 69 | mask = maskUtils.decode(segms[i]).astype(np.bool) 70 | img[mask] = img[mask] * 0.5 + color_mask * 0.5 71 | # draw bounding boxes 72 | labels = [ 73 | np.full(bbox.shape[0], i, dtype=np.int32) 74 | for i, bbox in enumerate(bbox_result) 75 | ] 76 | labels = np.concatenate(labels) 77 | mmcv.imshow_det_bboxes( 78 | img.copy(), 79 | bboxes, 80 | labels, 81 | class_names=class_names, 82 | score_thr=score_thr, 83 | show=out_file is None) 84 | -------------------------------------------------------------------------------- /detection/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .mask import * # noqa: F401, F403 4 | from .loss import * # noqa: F401, F403 5 | from .evaluation import * # noqa: F401, F403 6 | from .post_processing import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | -------------------------------------------------------------------------------- /detection/mmdet/core/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator 2 | from .anchor_target import anchor_target 3 | 4 | __all__ = ['AnchorGenerator', 'anchor_target'] 5 | -------------------------------------------------------------------------------- /detection/mmdet/core/anchor/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/anchor/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/anchor/__pycache__/anchor_generator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/anchor/__pycache__/anchor_generator.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/anchor/__pycache__/anchor_target.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/anchor/__pycache__/anchor_target.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/anchor/anchor_generator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class AnchorGenerator(object): 5 | 6 | def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None): 7 | self.base_size = base_size 8 | self.scales = torch.Tensor(scales) 9 | self.ratios = torch.Tensor(ratios) 10 | self.scale_major = scale_major 11 | self.ctr = ctr 12 | self.base_anchors = self.gen_base_anchors() 13 | 14 | @property 15 | def num_base_anchors(self): 16 | return self.base_anchors.size(0) 17 | 18 | def gen_base_anchors(self): 19 | w = self.base_size 20 | h = self.base_size 21 | if self.ctr is None: 22 | x_ctr = 0.5 * (w - 1) 23 | y_ctr = 0.5 * (h - 1) 24 | else: 25 | x_ctr, y_ctr = self.ctr 26 | 27 | h_ratios = torch.sqrt(self.ratios) 28 | w_ratios = 1 / h_ratios 29 | if self.scale_major: 30 | ws = (w * w_ratios[:, None] * self.scales[None, :]).view(-1) 31 | hs = (h * h_ratios[:, None] * self.scales[None, :]).view(-1) 32 | else: 33 | ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1) 34 | hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1) 35 | 36 | base_anchors = torch.stack( 37 | [ 38 | x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), 39 | x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1) 40 | ], 41 | dim=-1).round() 42 | 43 | return base_anchors 44 | 45 | def _meshgrid(self, x, y, row_major=True): 46 | xx = x.repeat(len(y)) 47 | yy = y.view(-1, 1).repeat(1, len(x)).view(-1) 48 | if row_major: 49 | return xx, yy 50 | else: 51 | return yy, xx 52 | 53 | def grid_anchors(self, featmap_size, stride=16, device='cuda'): 54 | base_anchors = self.base_anchors.to(device) 55 | 56 | feat_h, feat_w = featmap_size 57 | shift_x = torch.arange(0, feat_w, device=device) * stride 58 | shift_y = torch.arange(0, feat_h, device=device) * stride 59 | shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) 60 | shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) 61 | shifts = shifts.type_as(base_anchors) 62 | # first feat_w elements correspond to the first row of shifts 63 | # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get 64 | # shifted anchors (K, A, 4), reshape to (K*A, 4) 65 | 66 | all_anchors = base_anchors[None, :, :] + shifts[:, None, :] 67 | all_anchors = all_anchors.view(-1, 4) 68 | # first A rows correspond to A anchors of (0, 0) in feature map, 69 | # then (0, 1), (0, 2), ... 70 | return all_anchors 71 | 72 | def valid_flags(self, featmap_size, valid_size, device='cuda'): 73 | feat_h, feat_w = featmap_size 74 | valid_h, valid_w = valid_size 75 | assert valid_h <= feat_h and valid_w <= feat_w 76 | valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device) 77 | valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device) 78 | valid_x[:valid_w] = 1 79 | valid_y[:valid_h] = 1 80 | valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) 81 | valid = valid_xx & valid_yy 82 | valid = valid[:, None].expand( 83 | valid.size(0), self.num_base_anchors).contiguous().view(-1) 84 | return valid 85 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | from .geometry import bbox_overlaps 2 | from .assigners import BaseAssigner, MaxIoUAssigner, AssignResult 3 | from .samplers import (BaseSampler, PseudoSampler, RandomSampler, 4 | InstanceBalancedPosSampler, IoUBalancedNegSampler, 5 | CombinedSampler, SamplingResult) 6 | from .assign_sampling import build_assigner, build_sampler, assign_and_sample 7 | from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping, 8 | bbox_mapping_back, bbox2roi, roi2bbox, bbox2result) 9 | from .bbox_target import bbox_target 10 | 11 | __all__ = [ 12 | 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 13 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 14 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 15 | 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', 16 | 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', 17 | 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 'bbox_target' 18 | ] 19 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/__pycache__/assign_sampling.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/__pycache__/assign_sampling.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/__pycache__/bbox_target.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/__pycache__/bbox_target.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/__pycache__/geometry.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/__pycache__/geometry.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/__pycache__/transforms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/__pycache__/transforms.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assign_sampling.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from . import assigners, samplers 4 | 5 | 6 | def build_assigner(cfg, **kwargs): 7 | if isinstance(cfg, assigners.BaseAssigner): 8 | return cfg 9 | elif isinstance(cfg, dict): 10 | return mmcv.runner.obj_from_dict( 11 | cfg, assigners, default_args=kwargs) 12 | else: 13 | raise TypeError('Invalid type {} for building a sampler'.format( 14 | type(cfg))) 15 | 16 | 17 | def build_sampler(cfg, **kwargs): 18 | if isinstance(cfg, samplers.BaseSampler): 19 | return cfg 20 | elif isinstance(cfg, dict): 21 | return mmcv.runner.obj_from_dict( 22 | cfg, samplers, default_args=kwargs) 23 | else: 24 | raise TypeError('Invalid type {} for building a sampler'.format( 25 | type(cfg))) 26 | 27 | 28 | def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg): 29 | bbox_assigner = build_assigner(cfg.assigner) 30 | bbox_sampler = build_sampler(cfg.sampler) 31 | assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, 32 | gt_labels) 33 | sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, 34 | gt_labels) 35 | return assign_result, sampling_result 36 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_assigner import BaseAssigner 2 | from .max_iou_assigner import MaxIoUAssigner 3 | from .assign_result import AssignResult 4 | 5 | __all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult'] 6 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assigners/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/assigners/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assigners/__pycache__/base_assigner.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/assigners/__pycache__/base_assigner.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assigners/__pycache__/max_iou_assigner.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/assigners/__pycache__/max_iou_assigner.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assigners/assign_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class AssignResult(object): 5 | 6 | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): 7 | self.num_gts = num_gts 8 | self.gt_inds = gt_inds 9 | self.max_overlaps = max_overlaps 10 | self.labels = labels 11 | 12 | def add_gt_(self, gt_labels): 13 | self_inds = torch.arange( 14 | 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) 15 | self.gt_inds = torch.cat([self_inds, self.gt_inds]) 16 | self.max_overlaps = torch.cat( 17 | [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps]) 18 | if self.labels is not None: 19 | self.labels = torch.cat([gt_labels, self.labels]) 20 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | 6 | @abstractmethod 7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 8 | pass 9 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/bbox_target.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .transforms import bbox2delta 4 | from ..utils import multi_apply 5 | 6 | 7 | def bbox_target(pos_bboxes_list, 8 | neg_bboxes_list, 9 | pos_gt_bboxes_list, 10 | pos_gt_labels_list, 11 | cfg, 12 | reg_classes=1, 13 | target_means=[.0, .0, .0, .0], 14 | target_stds=[1.0, 1.0, 1.0, 1.0], 15 | concat=True): 16 | labels, label_weights, bbox_targets, bbox_weights = multi_apply( 17 | bbox_target_single, 18 | pos_bboxes_list, 19 | neg_bboxes_list, 20 | pos_gt_bboxes_list, 21 | pos_gt_labels_list, 22 | cfg=cfg, 23 | reg_classes=reg_classes, 24 | target_means=target_means, 25 | target_stds=target_stds) 26 | 27 | if concat: 28 | labels = torch.cat(labels, 0) 29 | label_weights = torch.cat(label_weights, 0) 30 | bbox_targets = torch.cat(bbox_targets, 0) 31 | bbox_weights = torch.cat(bbox_weights, 0) 32 | return labels, label_weights, bbox_targets, bbox_weights 33 | 34 | 35 | def bbox_target_single(pos_bboxes, 36 | neg_bboxes, 37 | pos_gt_bboxes, 38 | pos_gt_labels, 39 | cfg, 40 | reg_classes=1, 41 | target_means=[.0, .0, .0, .0], 42 | target_stds=[1.0, 1.0, 1.0, 1.0]): 43 | num_pos = pos_bboxes.size(0) 44 | num_neg = neg_bboxes.size(0) 45 | num_samples = num_pos + num_neg 46 | labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long) 47 | label_weights = pos_bboxes.new_zeros(num_samples) 48 | bbox_targets = pos_bboxes.new_zeros(num_samples, 4) 49 | bbox_weights = pos_bboxes.new_zeros(num_samples, 4) 50 | if num_pos > 0: 51 | labels[:num_pos] = pos_gt_labels 52 | pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight 53 | label_weights[:num_pos] = pos_weight 54 | pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means, 55 | target_stds) 56 | bbox_targets[:num_pos, :] = pos_bbox_targets 57 | bbox_weights[:num_pos, :] = 1 58 | if num_neg > 0: 59 | label_weights[-num_neg:] = 1.0 60 | if reg_classes > 1: 61 | bbox_targets, bbox_weights = expand_target(bbox_targets, bbox_weights, 62 | labels, reg_classes) 63 | 64 | return labels, label_weights, bbox_targets, bbox_weights 65 | 66 | 67 | def expand_target(bbox_targets, bbox_weights, labels, num_classes): 68 | bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0), 69 | 4 * num_classes)) 70 | bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0), 71 | 4 * num_classes)) 72 | for i in torch.nonzero(labels > 0).squeeze(-1): 73 | start, end = labels[i] * 4, (labels[i] + 1) * 4 74 | bbox_targets_expand[i, start:end] = bbox_targets[i, :] 75 | bbox_weights_expand[i, start:end] = bbox_weights[i, :] 76 | return bbox_targets_expand, bbox_weights_expand 77 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/geometry.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False): 5 | """Calculate overlap between two set of bboxes. 6 | 7 | If ``is_aligned`` is ``False``, then calculate the ious between each bbox 8 | of bboxes1 and bboxes2, otherwise the ious between each aligned pair of 9 | bboxes1 and bboxes2. 10 | 11 | Args: 12 | bboxes1 (Tensor): shape (m, 4) 13 | bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n 14 | must be equal. 15 | mode (str): "iou" (intersection over union) or iof (intersection over 16 | foreground). 17 | 18 | Returns: 19 | ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1) 20 | """ 21 | 22 | assert mode in ['iou', 'iof'] 23 | 24 | rows = bboxes1.size(0) 25 | cols = bboxes2.size(0) 26 | if is_aligned: 27 | assert rows == cols 28 | 29 | if rows * cols == 0: 30 | return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols) 31 | 32 | if is_aligned: 33 | lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2] 34 | rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2] 35 | 36 | wh = (rb - lt + 1).clamp(min=0) # [rows, 2] 37 | overlap = wh[:, 0] * wh[:, 1] 38 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 39 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 40 | 41 | if mode == 'iou': 42 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 43 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 44 | ious = overlap / (area1 + area2 - overlap) 45 | else: 46 | ious = overlap / area1 47 | else: 48 | lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2] 49 | rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2] 50 | 51 | wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2] 52 | overlap = wh[:, :, 0] * wh[:, :, 1] 53 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 54 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 55 | 56 | if mode == 'iou': 57 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 58 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 59 | ious = overlap / (area1[:, None] + area2 - overlap) 60 | else: 61 | ious = overlap / (area1[:, None]) 62 | 63 | return ious 64 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .pseudo_sampler import PseudoSampler 3 | from .random_sampler import RandomSampler 4 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 5 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 6 | from .combined_sampler import CombinedSampler 7 | from .ohem_sampler import OHEMSampler 8 | from .sampling_result import SamplingResult 9 | 10 | __all__ = [ 11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 13 | 'OHEMSampler', 'SamplingResult' 14 | ] 15 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/base_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/base_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/ohem_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/ohem_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/random_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/random_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/base_sampler.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | import torch 4 | 5 | from .sampling_result import SamplingResult 6 | 7 | 8 | class BaseSampler(metaclass=ABCMeta): 9 | 10 | def __init__(self, 11 | num, 12 | pos_fraction, 13 | neg_pos_ub=-1, 14 | add_gt_as_proposals=True, 15 | **kwargs): 16 | self.num = num 17 | self.pos_fraction = pos_fraction 18 | self.neg_pos_ub = neg_pos_ub 19 | self.add_gt_as_proposals = add_gt_as_proposals 20 | self.pos_sampler = self 21 | self.neg_sampler = self 22 | 23 | @abstractmethod 24 | def _sample_pos(self, assign_result, num_expected, **kwargs): 25 | pass 26 | 27 | @abstractmethod 28 | def _sample_neg(self, assign_result, num_expected, **kwargs): 29 | pass 30 | 31 | def sample(self, 32 | assign_result, 33 | bboxes, 34 | gt_bboxes, 35 | gt_labels=None, 36 | **kwargs): 37 | """Sample positive and negative bboxes. 38 | 39 | This is a simple implementation of bbox sampling given candidates, 40 | assigning results and ground truth bboxes. 41 | 42 | Args: 43 | assign_result (:obj:`AssignResult`): Bbox assigning results. 44 | bboxes (Tensor): Boxes to be sampled from. 45 | gt_bboxes (Tensor): Ground truth bboxes. 46 | gt_labels (Tensor, optional): Class labels of ground truth bboxes. 47 | 48 | Returns: 49 | :obj:`SamplingResult`: Sampling result. 50 | """ 51 | bboxes = bboxes[:, :4] 52 | 53 | gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) 54 | if self.add_gt_as_proposals: 55 | bboxes = torch.cat([gt_bboxes, bboxes], dim=0) 56 | assign_result.add_gt_(gt_labels) 57 | gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) 58 | gt_flags = torch.cat([gt_ones, gt_flags]) 59 | 60 | num_expected_pos = int(self.num * self.pos_fraction) 61 | pos_inds = self.pos_sampler._sample_pos( 62 | assign_result, num_expected_pos, bboxes=bboxes, **kwargs) 63 | # We found that sampled indices have duplicated items occasionally. 64 | # (may be a bug of PyTorch) 65 | pos_inds = pos_inds.unique() 66 | num_sampled_pos = pos_inds.numel() 67 | num_expected_neg = self.num - num_sampled_pos 68 | if self.neg_pos_ub >= 0: 69 | _pos = max(1, num_sampled_pos) 70 | neg_upper_bound = int(self.neg_pos_ub * _pos) 71 | if num_expected_neg > neg_upper_bound: 72 | num_expected_neg = neg_upper_bound 73 | neg_inds = self.neg_sampler._sample_neg( 74 | assign_result, num_expected_neg, bboxes=bboxes, **kwargs) 75 | neg_inds = neg_inds.unique() 76 | 77 | return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 78 | assign_result, gt_flags) 79 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from ..assign_sampling import build_sampler 3 | 4 | 5 | class CombinedSampler(BaseSampler): 6 | 7 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 8 | super(CombinedSampler, self).__init__(**kwargs) 9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .random_sampler import RandomSampler 5 | 6 | 7 | class InstanceBalancedPosSampler(RandomSampler): 8 | 9 | def _sample_pos(self, assign_result, num_expected, **kwargs): 10 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 11 | if pos_inds.numel() != 0: 12 | pos_inds = pos_inds.squeeze(1) 13 | if pos_inds.numel() <= num_expected: 14 | return pos_inds 15 | else: 16 | unique_gt_inds = assign_result.gt_inds[pos_inds].unique() 17 | num_gts = len(unique_gt_inds) 18 | num_per_gt = int(round(num_expected / float(num_gts)) + 1) 19 | sampled_inds = [] 20 | for i in unique_gt_inds: 21 | inds = torch.nonzero(assign_result.gt_inds == i.item()) 22 | if inds.numel() != 0: 23 | inds = inds.squeeze(1) 24 | else: 25 | continue 26 | if len(inds) > num_per_gt: 27 | inds = self.random_choice(inds, num_per_gt) 28 | sampled_inds.append(inds) 29 | sampled_inds = torch.cat(sampled_inds) 30 | if len(sampled_inds) < num_expected: 31 | num_extra = num_expected - len(sampled_inds) 32 | extra_inds = np.array( 33 | list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) 34 | if len(extra_inds) > num_extra: 35 | extra_inds = self.random_choice(extra_inds, num_extra) 36 | extra_inds = torch.from_numpy(extra_inds).to( 37 | assign_result.gt_inds.device).long() 38 | sampled_inds = torch.cat([sampled_inds, extra_inds]) 39 | elif len(sampled_inds) > num_expected: 40 | sampled_inds = self.random_choice(sampled_inds, num_expected) 41 | return sampled_inds 42 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .random_sampler import RandomSampler 5 | 6 | 7 | class IoUBalancedNegSampler(RandomSampler): 8 | 9 | def __init__(self, 10 | num, 11 | pos_fraction, 12 | hard_thr=0.1, 13 | hard_fraction=0.5, 14 | **kwargs): 15 | super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, 16 | **kwargs) 17 | assert hard_thr > 0 18 | assert 0 < hard_fraction < 1 19 | self.hard_thr = hard_thr 20 | self.hard_fraction = hard_fraction 21 | 22 | def _sample_neg(self, assign_result, num_expected, **kwargs): 23 | neg_inds = torch.nonzero(assign_result.gt_inds == 0) 24 | if neg_inds.numel() != 0: 25 | neg_inds = neg_inds.squeeze(1) 26 | if len(neg_inds) <= num_expected: 27 | return neg_inds 28 | else: 29 | max_overlaps = assign_result.max_overlaps.cpu().numpy() 30 | # balance sampling for negative samples 31 | neg_set = set(neg_inds.cpu().numpy()) 32 | easy_set = set( 33 | np.where( 34 | np.logical_and(max_overlaps >= 0, 35 | max_overlaps < self.hard_thr))[0]) 36 | hard_set = set(np.where(max_overlaps >= self.hard_thr)[0]) 37 | easy_neg_inds = list(easy_set & neg_set) 38 | hard_neg_inds = list(hard_set & neg_set) 39 | 40 | num_expected_hard = int(num_expected * self.hard_fraction) 41 | if len(hard_neg_inds) > num_expected_hard: 42 | sampled_hard_inds = self.random_choice(hard_neg_inds, 43 | num_expected_hard) 44 | else: 45 | sampled_hard_inds = np.array(hard_neg_inds, dtype=np.int) 46 | num_expected_easy = num_expected - len(sampled_hard_inds) 47 | if len(easy_neg_inds) > num_expected_easy: 48 | sampled_easy_inds = self.random_choice(easy_neg_inds, 49 | num_expected_easy) 50 | else: 51 | sampled_easy_inds = np.array(easy_neg_inds, dtype=np.int) 52 | sampled_inds = np.concatenate((sampled_easy_inds, 53 | sampled_hard_inds)) 54 | if len(sampled_inds) < num_expected: 55 | num_extra = num_expected - len(sampled_inds) 56 | extra_inds = np.array(list(neg_set - set(sampled_inds))) 57 | if len(extra_inds) > num_extra: 58 | extra_inds = self.random_choice(extra_inds, num_extra) 59 | sampled_inds = np.concatenate((sampled_inds, extra_inds)) 60 | sampled_inds = torch.from_numpy(sampled_inds).long().to( 61 | assign_result.gt_inds.device) 62 | return sampled_inds 63 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/ohem_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .base_sampler import BaseSampler 4 | from ..transforms import bbox2roi 5 | 6 | 7 | class OHEMSampler(BaseSampler): 8 | 9 | def __init__(self, 10 | num, 11 | pos_fraction, 12 | context, 13 | neg_pos_ub=-1, 14 | add_gt_as_proposals=True, 15 | **kwargs): 16 | super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, 17 | add_gt_as_proposals) 18 | self.bbox_roi_extractor = context.bbox_roi_extractor 19 | self.bbox_head = context.bbox_head 20 | 21 | def hard_mining(self, inds, num_expected, bboxes, labels, feats): 22 | with torch.no_grad(): 23 | rois = bbox2roi([bboxes]) 24 | bbox_feats = self.bbox_roi_extractor( 25 | feats[:self.bbox_roi_extractor.num_inputs], rois) 26 | cls_score, _ = self.bbox_head(bbox_feats) 27 | loss = self.bbox_head.loss( 28 | cls_score=cls_score, 29 | bbox_pred=None, 30 | labels=labels, 31 | label_weights=cls_score.new_ones(cls_score.size(0)), 32 | bbox_targets=None, 33 | bbox_weights=None, 34 | reduce=False)['loss_cls'] 35 | _, topk_loss_inds = loss.topk(num_expected) 36 | return inds[topk_loss_inds] 37 | 38 | def _sample_pos(self, 39 | assign_result, 40 | num_expected, 41 | bboxes=None, 42 | feats=None, 43 | **kwargs): 44 | # Sample some hard positive samples 45 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 46 | if pos_inds.numel() != 0: 47 | pos_inds = pos_inds.squeeze(1) 48 | if pos_inds.numel() <= num_expected: 49 | return pos_inds 50 | else: 51 | return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], 52 | assign_result.labels[pos_inds], feats) 53 | 54 | def _sample_neg(self, 55 | assign_result, 56 | num_expected, 57 | bboxes=None, 58 | feats=None, 59 | **kwargs): 60 | # Sample some hard negative samples 61 | neg_inds = torch.nonzero(assign_result.gt_inds == 0) 62 | if neg_inds.numel() != 0: 63 | neg_inds = neg_inds.squeeze(1) 64 | if len(neg_inds) <= num_expected: 65 | return neg_inds 66 | else: 67 | return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], 68 | assign_result.labels[neg_inds], feats) 69 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .base_sampler import BaseSampler 4 | from .sampling_result import SamplingResult 5 | 6 | 7 | class PseudoSampler(BaseSampler): 8 | 9 | def __init__(self, **kwargs): 10 | pass 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | 18 | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): 19 | pos_inds = torch.nonzero( 20 | assign_result.gt_inds > 0).squeeze(-1).unique() 21 | neg_inds = torch.nonzero( 22 | assign_result.gt_inds == 0).squeeze(-1).unique() 23 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) 24 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 25 | assign_result, gt_flags) 26 | return sampling_result 27 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/random_sampler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .base_sampler import BaseSampler 5 | 6 | 7 | class RandomSampler(BaseSampler): 8 | 9 | def __init__(self, 10 | num, 11 | pos_fraction, 12 | neg_pos_ub=-1, 13 | add_gt_as_proposals=True, 14 | **kwargs): 15 | super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, 16 | add_gt_as_proposals) 17 | 18 | @staticmethod 19 | def random_choice(gallery, num): 20 | """Random select some elements from the gallery. 21 | 22 | It seems that Pytorch's implementation is slower than numpy so we use 23 | numpy to randperm the indices. 24 | """ 25 | assert len(gallery) >= num 26 | if isinstance(gallery, list): 27 | gallery = np.array(gallery) 28 | cands = np.arange(len(gallery)) 29 | np.random.shuffle(cands) 30 | rand_inds = cands[:num] 31 | if not isinstance(gallery, np.ndarray): 32 | rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device) 33 | return gallery[rand_inds] 34 | 35 | def _sample_pos(self, assign_result, num_expected, **kwargs): 36 | """Randomly sample some positive samples.""" 37 | pos_inds = torch.nonzero(assign_result.gt_inds > 0) 38 | if pos_inds.numel() != 0: 39 | pos_inds = pos_inds.squeeze(1) 40 | if pos_inds.numel() <= num_expected: 41 | return pos_inds 42 | else: 43 | return self.random_choice(pos_inds, num_expected) 44 | 45 | def _sample_neg(self, assign_result, num_expected, **kwargs): 46 | """Randomly sample some negative samples.""" 47 | neg_inds = torch.nonzero(assign_result.gt_inds == 0) 48 | if neg_inds.numel() != 0: 49 | neg_inds = neg_inds.squeeze(1) 50 | if len(neg_inds) <= num_expected: 51 | return neg_inds 52 | else: 53 | return self.random_choice(neg_inds, num_expected) 54 | -------------------------------------------------------------------------------- /detection/mmdet/core/bbox/samplers/sampling_result.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SamplingResult(object): 5 | 6 | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, 7 | gt_flags): 8 | self.pos_inds = pos_inds 9 | self.neg_inds = neg_inds 10 | self.pos_bboxes = bboxes[pos_inds] 11 | self.neg_bboxes = bboxes[neg_inds] 12 | self.pos_is_gt = gt_flags[pos_inds] 13 | 14 | self.num_gts = gt_bboxes.shape[0] 15 | self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 16 | self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] 17 | if assign_result.labels is not None: 18 | self.pos_gt_labels = assign_result.labels[pos_inds] 19 | else: 20 | self.pos_gt_labels = None 21 | 22 | @property 23 | def bboxes(self): 24 | return torch.cat([self.pos_bboxes, self.neg_bboxes]) 25 | -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (voc_classes, imagenet_det_classes, 2 | imagenet_vid_classes, coco_classes, dataset_aliases, 3 | get_classes) 4 | from .coco_utils import coco_eval, fast_eval_recall, results2json 5 | from .eval_hooks import (DistEvalHook, DistEvalmAPHook, CocoDistEvalRecallHook, 6 | CocoDistEvalmAPHook) 7 | from .mean_ap import average_precision, eval_map, print_map_summary 8 | from .recall import (eval_recalls, print_recall_summary, plot_num_recall, 9 | plot_iou_recall) 10 | 11 | __all__ = [ 12 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 13 | 'coco_classes', 'dataset_aliases', 'get_classes', 'coco_eval', 14 | 'fast_eval_recall', 'results2json', 'DistEvalHook', 'DistEvalmAPHook', 15 | 'CocoDistEvalRecallHook', 'CocoDistEvalmAPHook', 'average_precision', 16 | 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 17 | 'plot_num_recall', 'plot_iou_recall' 18 | ] 19 | -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/evaluation/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__pycache__/class_names.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/evaluation/__pycache__/class_names.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__pycache__/coco_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/evaluation/__pycache__/coco_utils.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__pycache__/mean_ap.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/evaluation/__pycache__/mean_ap.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/__pycache__/recall.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/evaluation/__pycache__/recall.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/evaluation/bbox_overlaps.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def bbox_overlaps(bboxes1, bboxes2, mode='iou'): 5 | """Calculate the ious between each bbox of bboxes1 and bboxes2. 6 | 7 | Args: 8 | bboxes1(ndarray): shape (n, 4) 9 | bboxes2(ndarray): shape (k, 4) 10 | mode(str): iou (intersection over union) or iof (intersection 11 | over foreground) 12 | 13 | Returns: 14 | ious(ndarray): shape (n, k) 15 | """ 16 | 17 | assert mode in ['iou', 'iof'] 18 | 19 | bboxes1 = bboxes1.astype(np.float32) 20 | bboxes2 = bboxes2.astype(np.float32) 21 | rows = bboxes1.shape[0] 22 | cols = bboxes2.shape[0] 23 | ious = np.zeros((rows, cols), dtype=np.float32) 24 | if rows * cols == 0: 25 | return ious 26 | exchange = False 27 | if bboxes1.shape[0] > bboxes2.shape[0]: 28 | bboxes1, bboxes2 = bboxes2, bboxes1 29 | ious = np.zeros((cols, rows), dtype=np.float32) 30 | exchange = True 31 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( 32 | bboxes1[:, 3] - bboxes1[:, 1] + 1) 33 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( 34 | bboxes2[:, 3] - bboxes2[:, 1] + 1) 35 | for i in range(bboxes1.shape[0]): 36 | x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) 37 | y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) 38 | x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) 39 | y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) 40 | overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum( 41 | y_end - y_start + 1, 0) 42 | if mode == 'iou': 43 | union = area1[i] + area2 - overlap 44 | else: 45 | union = area1[i] if not exchange else area2 46 | ious[i, :] = overlap / union 47 | if exchange: 48 | ious = ious.T 49 | return ious 50 | -------------------------------------------------------------------------------- /detection/mmdet/core/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .losses import (weighted_nll_loss, weighted_cross_entropy, 2 | weighted_binary_cross_entropy, sigmoid_focal_loss, 3 | weighted_sigmoid_focal_loss, mask_cross_entropy, 4 | smooth_l1_loss, weighted_smoothl1, accuracy) 5 | 6 | __all__ = [ 7 | 'weighted_nll_loss', 'weighted_cross_entropy', 8 | 'weighted_binary_cross_entropy', 'sigmoid_focal_loss', 9 | 'weighted_sigmoid_focal_loss', 'mask_cross_entropy', 'smooth_l1_loss', 10 | 'weighted_smoothl1', 'accuracy' 11 | ] 12 | -------------------------------------------------------------------------------- /detection/mmdet/core/loss/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/loss/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/loss/__pycache__/losses.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/loss/__pycache__/losses.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import split_combined_polys 2 | from .mask_target import mask_target 3 | 4 | __all__ = ['split_combined_polys', 'mask_target'] 5 | -------------------------------------------------------------------------------- /detection/mmdet/core/mask/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/mask/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/mask/__pycache__/mask_target.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/mask/__pycache__/mask_target.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/mask/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/mask/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/mask/mask_target.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import mmcv 4 | 5 | 6 | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, 7 | cfg): 8 | cfg_list = [cfg for _ in range(len(pos_proposals_list))] 9 | mask_targets = map(mask_target_single, pos_proposals_list, 10 | pos_assigned_gt_inds_list, gt_masks_list, cfg_list) 11 | mask_targets = torch.cat(list(mask_targets)) 12 | return mask_targets 13 | 14 | 15 | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): 16 | mask_size = cfg.mask_size 17 | num_pos = pos_proposals.size(0) 18 | mask_targets = [] 19 | if num_pos > 0: 20 | proposals_np = pos_proposals.cpu().numpy() 21 | pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() 22 | for i in range(num_pos): 23 | gt_mask = gt_masks[pos_assigned_gt_inds[i]] 24 | bbox = proposals_np[i, :].astype(np.int32) 25 | x1, y1, x2, y2 = bbox 26 | w = np.maximum(x2 - x1 + 1, 1) 27 | h = np.maximum(y2 - y1 + 1, 1) 28 | # mask is uint8 both before and after resizing 29 | target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], 30 | (mask_size, mask_size)) 31 | mask_targets.append(target) 32 | mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( 33 | pos_proposals.device) 34 | else: 35 | mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size)) 36 | return mask_targets 37 | -------------------------------------------------------------------------------- /detection/mmdet/core/mask/utils.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | 4 | def split_combined_polys(polys, poly_lens, polys_per_mask): 5 | """Split the combined 1-D polys into masks. 6 | 7 | A mask is represented as a list of polys, and a poly is represented as 8 | a 1-D array. In dataset, all masks are concatenated into a single 1-D 9 | tensor. Here we need to split the tensor into original representations. 10 | 11 | Args: 12 | polys (list): a list (length = image num) of 1-D tensors 13 | poly_lens (list): a list (length = image num) of poly length 14 | polys_per_mask (list): a list (length = image num) of poly number 15 | of each mask 16 | 17 | Returns: 18 | list: a list (length = image num) of list (length = mask num) of 19 | list (length = poly num) of numpy array 20 | """ 21 | mask_polys_list = [] 22 | for img_id in range(len(polys)): 23 | polys_single = polys[img_id] 24 | polys_lens_single = poly_lens[img_id].tolist() 25 | polys_per_mask_single = polys_per_mask[img_id].tolist() 26 | 27 | split_polys = mmcv.slice_list(polys_single, polys_lens_single) 28 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) 29 | mask_polys_list.append(mask_polys) 30 | return mask_polys_list 31 | -------------------------------------------------------------------------------- /detection/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_proposals, merge_aug_bboxes, 3 | merge_aug_scores, merge_aug_masks) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks' 8 | ] 9 | -------------------------------------------------------------------------------- /detection/mmdet/core/post_processing/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/post_processing/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/post_processing/__pycache__/merge_augs.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/post_processing/__pycache__/merge_augs.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/post_processing/bbox_nms.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from mmdet.ops.nms import nms_wrapper 4 | 5 | 6 | def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1): 7 | """NMS for multi-class bboxes. 8 | 9 | Args: 10 | multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) 11 | multi_scores (Tensor): shape (n, #class) 12 | score_thr (float): bbox threshold, bboxes with scores lower than it 13 | will not be considered. 14 | nms_thr (float): NMS IoU threshold 15 | max_num (int): if there are more than max_num bboxes after NMS, 16 | only top max_num will be kept. 17 | 18 | Returns: 19 | tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels 20 | are 0-based. 21 | """ 22 | num_classes = multi_scores.shape[1] 23 | bboxes, labels = [], [] 24 | nms_cfg_ = nms_cfg.copy() 25 | nms_type = nms_cfg_.pop('type', 'nms') 26 | nms_op = getattr(nms_wrapper, nms_type) 27 | for i in range(1, num_classes): 28 | cls_inds = multi_scores[:, i] > score_thr 29 | if not cls_inds.any(): 30 | continue 31 | # get bboxes and scores of this class 32 | if multi_bboxes.shape[1] == 4: 33 | _bboxes = multi_bboxes[cls_inds, :] 34 | else: 35 | _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] 36 | _scores = multi_scores[cls_inds, i] 37 | cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) 38 | cls_dets, _ = nms_op(cls_dets, **nms_cfg_) 39 | cls_labels = multi_bboxes.new_full( 40 | (cls_dets.shape[0], ), i - 1, dtype=torch.long) 41 | bboxes.append(cls_dets) 42 | labels.append(cls_labels) 43 | if bboxes: 44 | bboxes = torch.cat(bboxes) 45 | labels = torch.cat(labels) 46 | if bboxes.shape[0] > max_num: 47 | _, inds = bboxes[:, -1].sort(descending=True) 48 | inds = inds[:max_num] 49 | bboxes = bboxes[inds] 50 | labels = labels[inds] 51 | else: 52 | bboxes = multi_bboxes.new_zeros((0, 5)) 53 | labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) 54 | 55 | return bboxes, labels 56 | -------------------------------------------------------------------------------- /detection/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import allreduce_grads, DistOptimizerHook 2 | from .misc import tensor2imgs, unmap, multi_apply 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', 6 | 'multi_apply' 7 | ] 8 | -------------------------------------------------------------------------------- /detection/mmdet/core/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/utils/__pycache__/dist_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/utils/__pycache__/dist_utils.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/utils/__pycache__/misc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/core/utils/__pycache__/misc.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/core/utils/dist_utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch.distributed as dist 4 | from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors, 5 | _take_tensors) 6 | from mmcv.runner import OptimizerHook 7 | 8 | 9 | def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): 10 | if bucket_size_mb > 0: 11 | bucket_size_bytes = bucket_size_mb * 1024 * 1024 12 | buckets = _take_tensors(tensors, bucket_size_bytes) 13 | else: 14 | buckets = OrderedDict() 15 | for tensor in tensors: 16 | tp = tensor.type() 17 | if tp not in buckets: 18 | buckets[tp] = [] 19 | buckets[tp].append(tensor) 20 | buckets = buckets.values() 21 | 22 | for bucket in buckets: 23 | flat_tensors = _flatten_dense_tensors(bucket) 24 | dist.all_reduce(flat_tensors) 25 | flat_tensors.div_(world_size) 26 | for tensor, synced in zip( 27 | bucket, _unflatten_dense_tensors(flat_tensors, bucket)): 28 | tensor.copy_(synced) 29 | 30 | 31 | def allreduce_grads(model, coalesce=True, bucket_size_mb=-1): 32 | grads = [ 33 | param.grad.data for param in model.parameters() 34 | if param.requires_grad and param.grad is not None 35 | ] 36 | world_size = dist.get_world_size() 37 | if coalesce: 38 | _allreduce_coalesced(grads, world_size, bucket_size_mb) 39 | else: 40 | for tensor in grads: 41 | dist.all_reduce(tensor.div_(world_size)) 42 | 43 | 44 | class DistOptimizerHook(OptimizerHook): 45 | 46 | def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): 47 | self.grad_clip = grad_clip 48 | self.coalesce = coalesce 49 | self.bucket_size_mb = bucket_size_mb 50 | 51 | def after_train_iter(self, runner): 52 | runner.optimizer.zero_grad() 53 | runner.outputs['loss'].backward() 54 | allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb) 55 | if self.grad_clip is not None: 56 | self.clip_grads(runner.model.parameters()) 57 | runner.optimizer.step() 58 | -------------------------------------------------------------------------------- /detection/mmdet/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import mmcv 4 | import numpy as np 5 | from six.moves import map, zip 6 | 7 | 8 | def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): 9 | num_imgs = tensor.size(0) 10 | mean = np.array(mean, dtype=np.float32) 11 | std = np.array(std, dtype=np.float32) 12 | imgs = [] 13 | for img_id in range(num_imgs): 14 | img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) 15 | img = mmcv.imdenormalize( 16 | img, mean, std, to_bgr=to_rgb).astype(np.uint8) 17 | imgs.append(np.ascontiguousarray(img)) 18 | return imgs 19 | 20 | 21 | def multi_apply(func, *args, **kwargs): 22 | pfunc = partial(func, **kwargs) if kwargs else func 23 | map_results = map(pfunc, *args) 24 | return tuple(map(list, zip(*map_results))) 25 | 26 | 27 | def unmap(data, count, inds, fill=0): 28 | """ Unmap a subset of item (data) back to the original set of items (of 29 | size count) """ 30 | if data.dim() == 1: 31 | ret = data.new_full((count, ), fill) 32 | ret[inds] = data 33 | else: 34 | new_size = (count, ) + data.size()[1:] 35 | ret = data.new_full(new_size, fill) 36 | ret[inds, :] = data 37 | return ret 38 | -------------------------------------------------------------------------------- /detection/mmdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .custom import CustomDataset 2 | from .xml_style import XMLDataset 3 | from .coco import CocoDataset 4 | from .voc import VOCDataset 5 | from .loader import GroupSampler, DistributedGroupSampler, build_dataloader 6 | from .utils import to_tensor, random_scale, show_ann, get_dataset 7 | from .concat_dataset import ConcatDataset 8 | from .repeat_dataset import RepeatDataset 9 | from .extra_aug import ExtraAugmentation 10 | 11 | __all__ = [ 12 | 'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler', 13 | 'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale', 14 | 'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset', 15 | 'ExtraAugmentation' 16 | ] 17 | -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/coco.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/coco.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/concat_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/concat_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/custom.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/custom.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/extra_aug.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/extra_aug.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/repeat_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/repeat_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/transforms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/transforms.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/voc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/voc.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/__pycache__/xml_style.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/__pycache__/xml_style.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/concat_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data.dataset import ConcatDataset as _ConcatDataset 3 | 4 | 5 | class ConcatDataset(_ConcatDataset): 6 | """A wrapper of concatenated dataset. 7 | 8 | Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but 9 | concat the group flag for image aspect ratio. 10 | 11 | Args: 12 | datasets (list[:obj:`Dataset`]): A list of datasets. 13 | """ 14 | 15 | def __init__(self, datasets): 16 | super(ConcatDataset, self).__init__(datasets) 17 | self.CLASSES = datasets[0].CLASSES 18 | if hasattr(datasets[0], 'flag'): 19 | flags = [] 20 | for i in range(0, len(datasets)): 21 | flags.append(datasets[i].flag) 22 | self.flag = np.concatenate(flags) 23 | -------------------------------------------------------------------------------- /detection/mmdet/datasets/loader/__init__.py: -------------------------------------------------------------------------------- 1 | from .build_loader import build_dataloader 2 | from .sampler import GroupSampler, DistributedGroupSampler 3 | 4 | __all__ = [ 5 | 'GroupSampler', 'DistributedGroupSampler', 'build_dataloader' 6 | ] 7 | -------------------------------------------------------------------------------- /detection/mmdet/datasets/loader/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/loader/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/loader/__pycache__/build_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/loader/__pycache__/build_loader.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/loader/__pycache__/sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/datasets/loader/__pycache__/sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/datasets/loader/build_loader.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | from mmcv.runner import get_dist_info 4 | from mmcv.parallel import collate 5 | from torch.utils.data import DataLoader 6 | 7 | from .sampler import GroupSampler, DistributedGroupSampler 8 | 9 | # https://github.com/pytorch/pytorch/issues/973 10 | import resource 11 | rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) 12 | resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) 13 | 14 | 15 | def build_dataloader(dataset, 16 | imgs_per_gpu, 17 | workers_per_gpu, 18 | num_gpus=1, 19 | dist=True, 20 | **kwargs): 21 | if dist: 22 | rank, world_size = get_dist_info() 23 | sampler = DistributedGroupSampler(dataset, imgs_per_gpu, world_size, 24 | rank) 25 | batch_size = imgs_per_gpu 26 | num_workers = workers_per_gpu 27 | else: 28 | if not kwargs.get('shuffle', True): 29 | sampler = None 30 | else: 31 | sampler = GroupSampler(dataset, imgs_per_gpu) 32 | batch_size = num_gpus * imgs_per_gpu 33 | num_workers = num_gpus * workers_per_gpu 34 | 35 | data_loader = DataLoader( 36 | dataset, 37 | batch_size=batch_size, 38 | sampler=sampler, 39 | num_workers=num_workers, 40 | collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), 41 | pin_memory=False, 42 | **kwargs) 43 | 44 | return data_loader 45 | -------------------------------------------------------------------------------- /detection/mmdet/datasets/repeat_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class RepeatDataset(object): 5 | 6 | def __init__(self, dataset, times): 7 | self.dataset = dataset 8 | self.times = times 9 | self.CLASSES = dataset.CLASSES 10 | if hasattr(self.dataset, 'flag'): 11 | self.flag = np.tile(self.dataset.flag, times) 12 | 13 | self._ori_len = len(self.dataset) 14 | 15 | def __getitem__(self, idx): 16 | return self.dataset[idx % self._ori_len] 17 | 18 | def __len__(self): 19 | return self.times * self._ori_len 20 | -------------------------------------------------------------------------------- /detection/mmdet/datasets/voc.py: -------------------------------------------------------------------------------- 1 | from .xml_style import XMLDataset 2 | 3 | 4 | class VOCDataset(XMLDataset): 5 | 6 | CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 7 | 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 8 | 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 9 | 'tvmonitor') 10 | 11 | def __init__(self, **kwargs): 12 | super(VOCDataset, self).__init__(**kwargs) 13 | if 'VOC2007' in self.img_prefix: 14 | self.year = 2007 15 | elif 'VOC2012' in self.img_prefix: 16 | self.year = 2012 17 | else: 18 | raise ValueError('Cannot infer dataset year from img_prefix') 19 | -------------------------------------------------------------------------------- /detection/mmdet/datasets/xml_style.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import xml.etree.ElementTree as ET 3 | 4 | import mmcv 5 | import numpy as np 6 | 7 | from .custom import CustomDataset 8 | 9 | 10 | class XMLDataset(CustomDataset): 11 | 12 | def __init__(self, **kwargs): 13 | super(XMLDataset, self).__init__(**kwargs) 14 | self.cat2label = {cat: i + 1 for i, cat in enumerate(self.CLASSES)} 15 | 16 | def load_annotations(self, ann_file): 17 | img_infos = [] 18 | img_ids = mmcv.list_from_file(ann_file) 19 | for img_id in img_ids: 20 | filename = 'JPEGImages/{}.jpg'.format(img_id) 21 | xml_path = osp.join(self.img_prefix, 'Annotations', 22 | '{}.xml'.format(img_id)) 23 | tree = ET.parse(xml_path) 24 | root = tree.getroot() 25 | size = root.find('size') 26 | width = int(size.find('width').text) 27 | height = int(size.find('height').text) 28 | img_infos.append( 29 | dict(id=img_id, filename=filename, width=width, height=height)) 30 | return img_infos 31 | 32 | def get_ann_info(self, idx): 33 | img_id = self.img_infos[idx]['id'] 34 | xml_path = osp.join(self.img_prefix, 'Annotations', 35 | '{}.xml'.format(img_id)) 36 | tree = ET.parse(xml_path) 37 | root = tree.getroot() 38 | bboxes = [] 39 | labels = [] 40 | bboxes_ignore = [] 41 | labels_ignore = [] 42 | for obj in root.findall('object'): 43 | name = obj.find('name').text 44 | label = self.cat2label[name] 45 | difficult = int(obj.find('difficult').text) 46 | bnd_box = obj.find('bndbox') 47 | bbox = [ 48 | int(bnd_box.find('xmin').text), 49 | int(bnd_box.find('ymin').text), 50 | int(bnd_box.find('xmax').text), 51 | int(bnd_box.find('ymax').text) 52 | ] 53 | if difficult: 54 | bboxes_ignore.append(bbox) 55 | labels_ignore.append(label) 56 | else: 57 | bboxes.append(bbox) 58 | labels.append(label) 59 | if not bboxes: 60 | bboxes = np.zeros((0, 4)) 61 | labels = np.zeros((0, )) 62 | else: 63 | bboxes = np.array(bboxes, ndmin=2) - 1 64 | labels = np.array(labels) 65 | if not bboxes_ignore: 66 | bboxes_ignore = np.zeros((0, 4)) 67 | labels_ignore = np.zeros((0, )) 68 | else: 69 | bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 70 | labels_ignore = np.array(labels_ignore) 71 | ann = dict( 72 | bboxes=bboxes.astype(np.float32), 73 | labels=labels.astype(np.int64), 74 | bboxes_ignore=bboxes_ignore.astype(np.float32), 75 | labels_ignore=labels_ignore.astype(np.int64)) 76 | return ann 77 | -------------------------------------------------------------------------------- /detection/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .necks import * # noqa: F401,F403 3 | from .roi_extractors import * # noqa: F401,F403 4 | from .anchor_heads import * # noqa: F401,F403 5 | from .bbox_heads import * # noqa: F401,F403 6 | from .mask_heads import * # noqa: F401,F403 7 | from .detectors import * # noqa: F401,F403 8 | from .registry import BACKBONES, NECKS, ROI_EXTRACTORS, HEADS, DETECTORS 9 | from .builder import (build_backbone, build_neck, build_roi_extractor, 10 | build_head, build_detector) 11 | 12 | __all__ = [ 13 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'HEADS', 'DETECTORS', 14 | 'build_backbone', 'build_neck', 'build_roi_extractor', 'build_head', 15 | 'build_detector' 16 | ] 17 | -------------------------------------------------------------------------------- /detection/mmdet/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/__pycache__/builder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/__pycache__/builder.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/__pycache__/registry.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/__pycache__/registry.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/anchor_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_head import AnchorHead 2 | from .rpn_head import RPNHead 3 | from .retina_head import RetinaHead 4 | from .ssd_head import SSDHead 5 | 6 | __all__ = ['AnchorHead', 'RPNHead', 'RetinaHead', 'SSDHead'] 7 | -------------------------------------------------------------------------------- /detection/mmdet/models/anchor_heads/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/anchor_heads/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/anchor_heads/__pycache__/anchor_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/anchor_heads/__pycache__/anchor_head.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/anchor_heads/__pycache__/retina_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/anchor_heads/__pycache__/retina_head.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/anchor_heads/__pycache__/rpn_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/anchor_heads/__pycache__/rpn_head.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/anchor_heads/__pycache__/ssd_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/anchor_heads/__pycache__/ssd_head.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/anchor_heads/retina_head.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | from mmcv.cnn import normal_init 4 | 5 | from .anchor_head import AnchorHead 6 | from ..registry import HEADS 7 | from ..utils import bias_init_with_prob 8 | 9 | 10 | @HEADS.register_module 11 | class RetinaHead(AnchorHead): 12 | 13 | def __init__(self, 14 | num_classes, 15 | in_channels, 16 | stacked_convs=4, 17 | octave_base_scale=4, 18 | scales_per_octave=3, 19 | **kwargs): 20 | self.stacked_convs = stacked_convs 21 | self.octave_base_scale = octave_base_scale 22 | self.scales_per_octave = scales_per_octave 23 | octave_scales = np.array( 24 | [2**(i / scales_per_octave) for i in range(scales_per_octave)]) 25 | anchor_scales = octave_scales * octave_base_scale 26 | super(RetinaHead, self).__init__( 27 | num_classes, 28 | in_channels, 29 | anchor_scales=anchor_scales, 30 | use_sigmoid_cls=True, 31 | use_focal_loss=True, 32 | **kwargs) 33 | 34 | def _init_layers(self): 35 | self.relu = nn.ReLU(inplace=True) 36 | self.cls_convs = nn.ModuleList() 37 | self.reg_convs = nn.ModuleList() 38 | for i in range(self.stacked_convs): 39 | chn = self.in_channels if i == 0 else self.feat_channels 40 | self.cls_convs.append( 41 | nn.Conv2d(chn, self.feat_channels, 3, stride=1, padding=1)) 42 | self.reg_convs.append( 43 | nn.Conv2d(chn, self.feat_channels, 3, stride=1, padding=1)) 44 | self.retina_cls = nn.Conv2d( 45 | self.feat_channels, 46 | self.num_anchors * self.cls_out_channels, 47 | 3, 48 | padding=1) 49 | self.retina_reg = nn.Conv2d( 50 | self.feat_channels, self.num_anchors * 4, 3, padding=1) 51 | 52 | def init_weights(self): 53 | for m in self.cls_convs: 54 | normal_init(m, std=0.01) 55 | for m in self.reg_convs: 56 | normal_init(m, std=0.01) 57 | bias_cls = bias_init_with_prob(0.01) 58 | normal_init(self.retina_cls, std=0.01, bias=bias_cls) 59 | normal_init(self.retina_reg, std=0.01) 60 | 61 | def forward_single(self, x): 62 | cls_feat = x 63 | reg_feat = x 64 | for cls_conv in self.cls_convs: 65 | cls_feat = self.relu(cls_conv(cls_feat)) 66 | for reg_conv in self.reg_convs: 67 | reg_feat = self.relu(reg_conv(reg_feat)) 68 | cls_score = self.retina_cls(cls_feat) 69 | bbox_pred = self.retina_reg(reg_feat) 70 | return cls_score, bbox_pred 71 | -------------------------------------------------------------------------------- /detection/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import ResNet 2 | from .resnext import ResNeXt 3 | from .ssd_vgg import SSDVGG 4 | from .resnet_sge import ResNetSGE 5 | from .resnet_se import ResNetSE 6 | from .resnet_cbam import ResNetCBAM 7 | from .resnet_bam import ResNetBAM 8 | from .resnet_gc import ResNetGC 9 | from .resnet_sk import ResNetSK 10 | 11 | __all__ = ['ResNet', 'ResNeXt', 'SSDVGG', 'ResNetSGE' \ 12 | , 'ResNetSE', 'ResNetCBAM', 'ResNetGC' \ 13 | , 'ResNetBAM', 'ResNetSK'] 14 | -------------------------------------------------------------------------------- /detection/mmdet/models/backbones/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/backbones/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/backbones/__pycache__/resnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/backbones/__pycache__/resnet.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/backbones/__pycache__/resnext.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/backbones/__pycache__/resnext.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/backbones/__pycache__/ssd_vgg.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/backbones/__pycache__/ssd_vgg.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/backbones/bam.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | class Flatten(nn.Module): 7 | def forward(self, x): 8 | return x.view(x.size(0), -1) 9 | class ChannelGate(nn.Module): 10 | def __init__(self, gate_channel, reduction_ratio=16, num_layers=1): 11 | super(ChannelGate, self).__init__() 12 | # self.gate_activation = gate_activation 13 | self.gate_c = nn.Sequential() 14 | self.gate_c.add_module( 'flatten', Flatten() ) 15 | gate_channels = [gate_channel] 16 | gate_channels += [gate_channel // reduction_ratio] * num_layers 17 | gate_channels += [gate_channel] 18 | for i in range( len(gate_channels) - 2 ): 19 | self.gate_c.add_module( 'gate_c_fc_%d'%i, nn.Linear(gate_channels[i], gate_channels[i+1]) ) 20 | self.gate_c.add_module( 'gate_c_bn_%d'%(i+1), nn.BatchNorm1d(gate_channels[i+1]) ) 21 | self.gate_c.add_module( 'gate_c_relu_%d'%(i+1), nn.ReLU() ) 22 | self.gate_c.add_module( 'gate_c_fc_final', nn.Linear(gate_channels[-2], gate_channels[-1]) ) 23 | self.pool = nn.AdaptiveAvgPool2d(1) 24 | def forward(self, in_tensor): 25 | #avg_pool = F.avg_pool2d( in_tensor, in_tensor.size(2), stride=in_tensor.size(2) ) 26 | avg_pool = self.pool(in_tensor) 27 | return self.gate_c( avg_pool ).unsqueeze(2).unsqueeze(3).expand_as(in_tensor) 28 | 29 | class SpatialGate(nn.Module): 30 | def __init__(self, gate_channel, reduction_ratio=16, dilation_conv_num=2, dilation_val=4): 31 | super(SpatialGate, self).__init__() 32 | self.gate_s = nn.Sequential() 33 | self.gate_s.add_module( 'gate_s_conv_reduce0', nn.Conv2d(gate_channel, gate_channel//reduction_ratio, kernel_size=1)) 34 | self.gate_s.add_module( 'gate_s_bn_reduce0', nn.BatchNorm2d(gate_channel//reduction_ratio) ) 35 | self.gate_s.add_module( 'gate_s_relu_reduce0',nn.ReLU() ) 36 | for i in range( dilation_conv_num ): 37 | self.gate_s.add_module( 'gate_s_conv_di_%d'%i, nn.Conv2d(gate_channel//reduction_ratio, gate_channel//reduction_ratio, kernel_size=3, \ 38 | padding=dilation_val, dilation=dilation_val) ) 39 | self.gate_s.add_module( 'gate_s_bn_di_%d'%i, nn.BatchNorm2d(gate_channel//reduction_ratio) ) 40 | self.gate_s.add_module( 'gate_s_relu_di_%d'%i, nn.ReLU() ) 41 | self.gate_s.add_module( 'gate_s_conv_final', nn.Conv2d(gate_channel//reduction_ratio, 1, kernel_size=1) ) 42 | def forward(self, in_tensor): 43 | return self.gate_s( in_tensor ).expand_as(in_tensor) 44 | class BAM(nn.Module): 45 | def __init__(self, gate_channel): 46 | super(BAM, self).__init__() 47 | self.channel_att = ChannelGate(gate_channel) 48 | self.spatial_att = SpatialGate(gate_channel) 49 | def forward(self,in_tensor): 50 | ca = self.channel_att(in_tensor) 51 | sa = self.spatial_att(in_tensor) 52 | #print('ca size = ', ca.size(), 'sa size = ', sa.size()) 53 | att = 1 + F.sigmoid( ca * sa ) #self.channel_att(in_tensor) * self.spatial_att(in_tensor) ) 54 | return att * in_tensor 55 | -------------------------------------------------------------------------------- /detection/mmdet/models/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead 3 | 4 | __all__ = ['BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead'] 5 | -------------------------------------------------------------------------------- /detection/mmdet/models/bbox_heads/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/bbox_heads/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/bbox_heads/__pycache__/bbox_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/bbox_heads/__pycache__/bbox_head.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/bbox_heads/__pycache__/convfc_bbox_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/bbox_heads/__pycache__/convfc_bbox_head.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/builder.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | from torch import nn 3 | 4 | from .registry import BACKBONES, NECKS, ROI_EXTRACTORS, HEADS, DETECTORS 5 | 6 | 7 | def _build_module(cfg, registry, default_args): 8 | assert isinstance(cfg, dict) and 'type' in cfg 9 | assert isinstance(default_args, dict) or default_args is None 10 | args = cfg.copy() 11 | obj_type = args.pop('type') 12 | if mmcv.is_str(obj_type): 13 | if obj_type not in registry.module_dict: 14 | raise KeyError('{} is not in the {} registry'.format( 15 | obj_type, registry.name)) 16 | obj_type = registry.module_dict[obj_type] 17 | elif not isinstance(obj_type, type): 18 | raise TypeError('type must be a str or valid type, but got {}'.format( 19 | type(obj_type))) 20 | if default_args is not None: 21 | for name, value in default_args.items(): 22 | args.setdefault(name, value) 23 | return obj_type(**args) 24 | 25 | 26 | def build(cfg, registry, default_args=None): 27 | if isinstance(cfg, list): 28 | modules = [_build_module(cfg_, registry, default_args) for cfg_ in cfg] 29 | return nn.Sequential(*modules) 30 | else: 31 | return _build_module(cfg, registry, default_args) 32 | 33 | 34 | def build_backbone(cfg): 35 | return build(cfg, BACKBONES) 36 | 37 | 38 | def build_neck(cfg): 39 | return build(cfg, NECKS) 40 | 41 | 42 | def build_roi_extractor(cfg): 43 | return build(cfg, ROI_EXTRACTORS) 44 | 45 | 46 | def build_head(cfg): 47 | return build(cfg, HEADS) 48 | 49 | 50 | def build_detector(cfg, train_cfg=None, test_cfg=None): 51 | return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) 52 | 53 | -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseDetector 2 | from .single_stage import SingleStageDetector 3 | from .two_stage import TwoStageDetector 4 | from .rpn import RPN 5 | from .fast_rcnn import FastRCNN 6 | from .faster_rcnn import FasterRCNN 7 | from .mask_rcnn import MaskRCNN 8 | from .cascade_rcnn import CascadeRCNN 9 | from .retinanet import RetinaNet 10 | 11 | __all__ = [ 12 | 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', 13 | 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'RetinaNet' 14 | ] 15 | -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/base.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/base.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/cascade_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/cascade_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/fast_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/fast_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/faster_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/faster_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/mask_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/mask_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/retinanet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/retinanet.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/rpn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/rpn.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/single_stage.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/single_stage.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/test_mixins.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/test_mixins.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/__pycache__/two_stage.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/detectors/__pycache__/two_stage.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/fast_rcnn.py: -------------------------------------------------------------------------------- 1 | from .two_stage import TwoStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class FastRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_roi_extractor, 12 | bbox_head, 13 | train_cfg, 14 | test_cfg, 15 | mask_roi_extractor=None, 16 | mask_head=None, 17 | pretrained=None): 18 | super(FastRCNN, self).__init__( 19 | backbone=backbone, 20 | neck=neck, 21 | bbox_roi_extractor=bbox_roi_extractor, 22 | bbox_head=bbox_head, 23 | train_cfg=train_cfg, 24 | test_cfg=test_cfg, 25 | mask_roi_extractor=mask_roi_extractor, 26 | mask_head=mask_head, 27 | pretrained=pretrained) 28 | 29 | def forward_test(self, imgs, img_metas, proposals, **kwargs): 30 | for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: 31 | if not isinstance(var, list): 32 | raise TypeError('{} must be a list, but got {}'.format( 33 | name, type(var))) 34 | 35 | num_augs = len(imgs) 36 | if num_augs != len(img_metas): 37 | raise ValueError( 38 | 'num of augmentations ({}) != num of image meta ({})'.format( 39 | len(imgs), len(img_metas))) 40 | # TODO: remove the restriction of imgs_per_gpu == 1 when prepared 41 | imgs_per_gpu = imgs[0].size(0) 42 | assert imgs_per_gpu == 1 43 | 44 | if num_augs == 1: 45 | return self.simple_test(imgs[0], img_metas[0], proposals[0], 46 | **kwargs) 47 | else: 48 | return self.aug_test(imgs, img_metas, proposals, **kwargs) 49 | -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | from .two_stage import TwoStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class FasterRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | rpn_head, 12 | bbox_roi_extractor, 13 | bbox_head, 14 | train_cfg, 15 | test_cfg, 16 | pretrained=None): 17 | super(FasterRCNN, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | bbox_roi_extractor=bbox_roi_extractor, 22 | bbox_head=bbox_head, 23 | train_cfg=train_cfg, 24 | test_cfg=test_cfg, 25 | pretrained=pretrained) 26 | -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | from .two_stage import TwoStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class MaskRCNN(TwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | rpn_head, 12 | bbox_roi_extractor, 13 | bbox_head, 14 | mask_roi_extractor, 15 | mask_head, 16 | train_cfg, 17 | test_cfg, 18 | pretrained=None): 19 | super(MaskRCNN, self).__init__( 20 | backbone=backbone, 21 | neck=neck, 22 | rpn_head=rpn_head, 23 | bbox_roi_extractor=bbox_roi_extractor, 24 | bbox_head=bbox_head, 25 | mask_roi_extractor=mask_roi_extractor, 26 | mask_head=mask_head, 27 | train_cfg=train_cfg, 28 | test_cfg=test_cfg, 29 | pretrained=pretrained) 30 | -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from .single_stage import SingleStageDetector 2 | from ..registry import DETECTORS 3 | 4 | 5 | @DETECTORS.register_module 6 | class RetinaNet(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /detection/mmdet/models/detectors/single_stage.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .base import BaseDetector 4 | from .. import builder 5 | from ..registry import DETECTORS 6 | from mmdet.core import bbox2result 7 | 8 | 9 | @DETECTORS.register_module 10 | class SingleStageDetector(BaseDetector): 11 | 12 | def __init__(self, 13 | backbone, 14 | neck=None, 15 | bbox_head=None, 16 | train_cfg=None, 17 | test_cfg=None, 18 | pretrained=None): 19 | super(SingleStageDetector, self).__init__() 20 | self.backbone = builder.build_backbone(backbone) 21 | if neck is not None: 22 | self.neck = builder.build_neck(neck) 23 | self.bbox_head = builder.build_head(bbox_head) 24 | self.train_cfg = train_cfg 25 | self.test_cfg = test_cfg 26 | self.init_weights(pretrained=pretrained) 27 | 28 | def init_weights(self, pretrained=None): 29 | super(SingleStageDetector, self).init_weights(pretrained) 30 | self.backbone.init_weights(pretrained=pretrained) 31 | if self.with_neck: 32 | if isinstance(self.neck, nn.Sequential): 33 | for m in self.neck: 34 | m.init_weights() 35 | else: 36 | self.neck.init_weights() 37 | self.bbox_head.init_weights() 38 | 39 | def extract_feat(self, img): 40 | x = self.backbone(img) 41 | if self.with_neck: 42 | x = self.neck(x) 43 | return x 44 | 45 | def forward_train(self, 46 | img, 47 | img_metas, 48 | gt_bboxes, 49 | gt_labels, 50 | gt_bboxes_ignore=None): 51 | x = self.extract_feat(img) 52 | outs = self.bbox_head(x) 53 | loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) 54 | losses = self.bbox_head.loss( 55 | *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) 56 | return losses 57 | 58 | def simple_test(self, img, img_meta, rescale=False): 59 | x = self.extract_feat(img) 60 | outs = self.bbox_head(x) 61 | bbox_inputs = outs + (img_meta, self.test_cfg, rescale) 62 | bbox_list = self.bbox_head.get_bboxes(*bbox_inputs) 63 | bbox_results = [ 64 | bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) 65 | for det_bboxes, det_labels in bbox_list 66 | ] 67 | return bbox_results[0] 68 | 69 | def aug_test(self, imgs, img_metas, rescale=False): 70 | raise NotImplementedError 71 | -------------------------------------------------------------------------------- /detection/mmdet/models/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .fcn_mask_head import FCNMaskHead 2 | 3 | __all__ = ['FCNMaskHead'] 4 | -------------------------------------------------------------------------------- /detection/mmdet/models/mask_heads/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/mask_heads/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/mask_heads/__pycache__/fcn_mask_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/mask_heads/__pycache__/fcn_mask_head.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .fpn import FPN 2 | from .fpn_sge import FPNSGE 3 | 4 | __all__ = ['FPN', 'FPNSGE'] 5 | -------------------------------------------------------------------------------- /detection/mmdet/models/necks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/necks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/necks/__pycache__/fpn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/necks/__pycache__/fpn.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/registry.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class Registry(object): 5 | 6 | def __init__(self, name): 7 | self._name = name 8 | self._module_dict = dict() 9 | 10 | @property 11 | def name(self): 12 | return self._name 13 | 14 | @property 15 | def module_dict(self): 16 | return self._module_dict 17 | 18 | def _register_module(self, module_class): 19 | """Register a module. 20 | 21 | Args: 22 | module (:obj:`nn.Module`): Module to be registered. 23 | """ 24 | if not issubclass(module_class, nn.Module): 25 | raise TypeError( 26 | 'module must be a child of nn.Module, but got {}'.format( 27 | module_class)) 28 | module_name = module_class.__name__ 29 | if module_name in self._module_dict: 30 | raise KeyError('{} is already registered in {}'.format( 31 | module_name, self.name)) 32 | self._module_dict[module_name] = module_class 33 | 34 | def register_module(self, cls): 35 | self._register_module(cls) 36 | return cls 37 | 38 | 39 | BACKBONES = Registry('backbone') 40 | NECKS = Registry('neck') 41 | ROI_EXTRACTORS = Registry('roi_extractor') 42 | HEADS = Registry('head') 43 | DETECTORS = Registry('detector') 44 | -------------------------------------------------------------------------------- /detection/mmdet/models/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .single_level import SingleRoIExtractor 2 | 3 | __all__ = ['SingleRoIExtractor'] 4 | -------------------------------------------------------------------------------- /detection/mmdet/models/roi_extractors/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/roi_extractors/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/roi_extractors/__pycache__/single_level.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/roi_extractors/__pycache__/single_level.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/roi_extractors/single_level.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from mmdet import ops 7 | from ..registry import ROI_EXTRACTORS 8 | 9 | 10 | @ROI_EXTRACTORS.register_module 11 | class SingleRoIExtractor(nn.Module): 12 | """Extract RoI features from a single level feature map. 13 | 14 | If there are mulitple input feature levels, each RoI is mapped to a level 15 | according to its scale. 16 | 17 | Args: 18 | roi_layer (dict): Specify RoI layer type and arguments. 19 | out_channels (int): Output channels of RoI layers. 20 | featmap_strides (int): Strides of input feature maps. 21 | finest_scale (int): Scale threshold of mapping to level 0. 22 | """ 23 | 24 | def __init__(self, 25 | roi_layer, 26 | out_channels, 27 | featmap_strides, 28 | finest_scale=56): 29 | super(SingleRoIExtractor, self).__init__() 30 | self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) 31 | self.out_channels = out_channels 32 | self.featmap_strides = featmap_strides 33 | self.finest_scale = finest_scale 34 | 35 | @property 36 | def num_inputs(self): 37 | """int: Input feature map levels.""" 38 | return len(self.featmap_strides) 39 | 40 | def init_weights(self): 41 | pass 42 | 43 | def build_roi_layers(self, layer_cfg, featmap_strides): 44 | cfg = layer_cfg.copy() 45 | layer_type = cfg.pop('type') 46 | assert hasattr(ops, layer_type) 47 | layer_cls = getattr(ops, layer_type) 48 | roi_layers = nn.ModuleList( 49 | [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) 50 | return roi_layers 51 | 52 | def map_roi_levels(self, rois, num_levels): 53 | """Map rois to corresponding feature levels by scales. 54 | 55 | - scale < finest_scale: level 0 56 | - finest_scale <= scale < finest_scale * 2: level 1 57 | - finest_scale * 2 <= scale < finest_scale * 4: level 2 58 | - scale >= finest_scale * 4: level 3 59 | 60 | Args: 61 | rois (Tensor): Input RoIs, shape (k, 5). 62 | num_levels (int): Total level number. 63 | 64 | Returns: 65 | Tensor: Level index (0-based) of each RoI, shape (k, ) 66 | """ 67 | scale = torch.sqrt( 68 | (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1)) 69 | target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) 70 | target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() 71 | return target_lvls 72 | 73 | def forward(self, feats, rois): 74 | if len(feats) == 1: 75 | return self.roi_layers[0](feats[0], rois) 76 | 77 | out_size = self.roi_layers[0].out_size 78 | num_levels = len(feats) 79 | target_lvls = self.map_roi_levels(rois, num_levels) 80 | roi_feats = torch.cuda.FloatTensor(rois.size()[0], self.out_channels, 81 | out_size, out_size).fill_(0) 82 | for i in range(num_levels): 83 | inds = target_lvls == i 84 | if inds.any(): 85 | rois_ = rois[inds, :] 86 | roi_feats_t = self.roi_layers[i](feats[i], rois_) 87 | roi_feats[inds] += roi_feats_t 88 | return roi_feats 89 | -------------------------------------------------------------------------------- /detection/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_module import ConvModule 2 | from .norm import build_norm_layer 3 | from .weight_init import (xavier_init, normal_init, uniform_init, kaiming_init, 4 | bias_init_with_prob) 5 | 6 | __all__ = [ 7 | 'ConvModule', 'build_norm_layer', 'xavier_init', 'normal_init', 8 | 'uniform_init', 'kaiming_init', 'bias_init_with_prob' 9 | ] 10 | -------------------------------------------------------------------------------- /detection/mmdet/models/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/utils/__pycache__/conv_module.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/utils/__pycache__/conv_module.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/utils/__pycache__/norm.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/utils/__pycache__/norm.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/utils/__pycache__/weight_init.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/models/utils/__pycache__/weight_init.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/models/utils/conv_module.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import torch.nn as nn 4 | from mmcv.cnn import kaiming_init, constant_init 5 | 6 | from .norm import build_norm_layer 7 | 8 | 9 | class ConvModule(nn.Module): 10 | 11 | def __init__(self, 12 | in_channels, 13 | out_channels, 14 | kernel_size, 15 | stride=1, 16 | padding=0, 17 | dilation=1, 18 | groups=1, 19 | bias=True, 20 | normalize=None, 21 | activation='relu', 22 | inplace=True, 23 | activate_last=True): 24 | super(ConvModule, self).__init__() 25 | self.with_norm = normalize is not None 26 | self.with_activatation = activation is not None 27 | self.with_bias = bias 28 | self.activation = activation 29 | self.activate_last = activate_last 30 | 31 | if self.with_norm and self.with_bias: 32 | warnings.warn('ConvModule has norm and bias at the same time') 33 | 34 | self.conv = nn.Conv2d( 35 | in_channels, 36 | out_channels, 37 | kernel_size, 38 | stride, 39 | padding, 40 | dilation, 41 | groups, 42 | bias=bias) 43 | 44 | self.in_channels = self.conv.in_channels 45 | self.out_channels = self.conv.out_channels 46 | self.kernel_size = self.conv.kernel_size 47 | self.stride = self.conv.stride 48 | self.padding = self.conv.padding 49 | self.dilation = self.conv.dilation 50 | self.transposed = self.conv.transposed 51 | self.output_padding = self.conv.output_padding 52 | self.groups = self.conv.groups 53 | 54 | if self.with_norm: 55 | norm_channels = out_channels if self.activate_last else in_channels 56 | self.norm_name, norm = build_norm_layer(normalize, norm_channels) 57 | self.add_module(self.norm_name, norm) 58 | 59 | if self.with_activatation: 60 | assert activation in ['relu'], 'Only ReLU supported.' 61 | if self.activation == 'relu': 62 | self.activate = nn.ReLU(inplace=inplace) 63 | 64 | # Default using msra init 65 | self.init_weights() 66 | 67 | @property 68 | def norm(self): 69 | return getattr(self, self.norm_name) 70 | 71 | def init_weights(self): 72 | nonlinearity = 'relu' if self.activation is None else self.activation 73 | kaiming_init(self.conv, nonlinearity=nonlinearity) 74 | if self.with_norm: 75 | constant_init(self.norm, 1, bias=0) 76 | 77 | def forward(self, x, activate=True, norm=True): 78 | if self.activate_last: 79 | x = self.conv(x) 80 | if norm and self.with_norm: 81 | x = self.norm(x) 82 | if activate and self.with_activatation: 83 | x = self.activate(x) 84 | else: 85 | if norm and self.with_norm: 86 | x = self.norm(x) 87 | if activate and self.with_activatation: 88 | x = self.activate(x) 89 | x = self.conv(x) 90 | return x 91 | -------------------------------------------------------------------------------- /detection/mmdet/models/utils/norm.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | norm_cfg = { 5 | # format: layer_type: (abbreviation, module) 6 | 'BN': ('bn', nn.BatchNorm2d), 7 | 'SyncBN': ('bn', None), 8 | 'GN': ('gn', nn.GroupNorm), 9 | # and potentially 'SN' 10 | } 11 | 12 | 13 | def build_norm_layer(cfg, num_features, postfix=''): 14 | """ Build normalization layer 15 | 16 | Args: 17 | cfg (dict): cfg should contain: 18 | type (str): identify norm layer type. 19 | layer args: args needed to instantiate a norm layer. 20 | frozen (bool): [optional] whether stop gradient updates 21 | of norm layer, it is helpful to set frozen mode 22 | in backbone's norms. 23 | num_features (int): number of channels from input 24 | postfix (int, str): appended into norm abbreation to 25 | create named layer. 26 | 27 | Returns: 28 | name (str): abbreation + postfix 29 | layer (nn.Module): created norm layer 30 | """ 31 | assert isinstance(cfg, dict) and 'type' in cfg 32 | cfg_ = cfg.copy() 33 | 34 | layer_type = cfg_.pop('type') 35 | if layer_type not in norm_cfg: 36 | raise KeyError('Unrecognized norm type {}'.format(layer_type)) 37 | else: 38 | abbr, norm_layer = norm_cfg[layer_type] 39 | if norm_layer is None: 40 | raise NotImplementedError 41 | 42 | assert isinstance(postfix, (int, str)) 43 | name = abbr + str(postfix) 44 | 45 | frozen = cfg_.pop('frozen', False) 46 | cfg_.setdefault('eps', 1e-5) 47 | if layer_type != 'GN': 48 | layer = norm_layer(num_features, **cfg_) 49 | else: 50 | assert 'num_groups' in cfg_ 51 | layer = norm_layer(num_channels=num_features, **cfg_) 52 | 53 | if frozen: 54 | for param in layer.parameters(): 55 | param.requires_grad = False 56 | 57 | return name, layer 58 | -------------------------------------------------------------------------------- /detection/mmdet/models/utils/weight_init.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | 4 | 5 | def xavier_init(module, gain=1, bias=0, distribution='normal'): 6 | assert distribution in ['uniform', 'normal'] 7 | if distribution == 'uniform': 8 | nn.init.xavier_uniform_(module.weight, gain=gain) 9 | else: 10 | nn.init.xavier_normal_(module.weight, gain=gain) 11 | if hasattr(module, 'bias'): 12 | nn.init.constant_(module.bias, bias) 13 | 14 | 15 | def normal_init(module, mean=0, std=1, bias=0): 16 | nn.init.normal_(module.weight, mean, std) 17 | if hasattr(module, 'bias'): 18 | nn.init.constant_(module.bias, bias) 19 | 20 | 21 | def uniform_init(module, a=0, b=1, bias=0): 22 | nn.init.uniform_(module.weight, a, b) 23 | if hasattr(module, 'bias'): 24 | nn.init.constant_(module.bias, bias) 25 | 26 | 27 | def kaiming_init(module, 28 | mode='fan_out', 29 | nonlinearity='relu', 30 | bias=0, 31 | distribution='normal'): 32 | assert distribution in ['uniform', 'normal'] 33 | if distribution == 'uniform': 34 | nn.init.kaiming_uniform_( 35 | module.weight, mode=mode, nonlinearity=nonlinearity) 36 | else: 37 | nn.init.kaiming_normal_( 38 | module.weight, mode=mode, nonlinearity=nonlinearity) 39 | if hasattr(module, 'bias'): 40 | nn.init.constant_(module.bias, bias) 41 | 42 | 43 | def bias_init_with_prob(prior_prob): 44 | """ initialize conv/fc bias value according to giving probablity""" 45 | bias_init = float(-np.log((1 - prior_prob) / prior_prob)) 46 | return bias_init 47 | -------------------------------------------------------------------------------- /detection/mmdet/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .dcn import (DeformConv, DeformRoIPooling, DeformRoIPoolingPack, 2 | ModulatedDeformRoIPoolingPack, ModulatedDeformConv, 3 | ModulatedDeformConvPack, deform_conv, modulated_deform_conv, 4 | deform_roi_pooling) 5 | from .nms import nms, soft_nms 6 | from .roi_align import RoIAlign, roi_align 7 | from .roi_pool import RoIPool, roi_pool 8 | 9 | __all__ = [ 10 | 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 11 | 'DeformConv', 'DeformRoIPooling', 'DeformRoIPoolingPack', 12 | 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', 13 | 'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv', 14 | 'deform_roi_pooling' 15 | ] 16 | -------------------------------------------------------------------------------- /detection/mmdet/ops/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions.deform_conv import deform_conv, modulated_deform_conv 2 | from .functions.deform_pool import deform_roi_pooling 3 | from .modules.deform_conv import (DeformConv, ModulatedDeformConv, 4 | ModulatedDeformConvPack) 5 | from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, 6 | ModulatedDeformRoIPoolingPack) 7 | 8 | __all__ = [ 9 | 'DeformConv', 'DeformRoIPooling', 'DeformRoIPoolingPack', 10 | 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', 11 | 'ModulatedDeformConvPack', 'deform_conv', 12 | 'modulated_deform_conv', 'deform_roi_pooling' 13 | ] 14 | -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/deform_conv_cuda.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/deform_conv_cuda.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/deform_pool_cuda.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/deform_pool_cuda.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/functions/__init__.py -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/functions/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/functions/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/functions/__pycache__/deform_conv.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/functions/__pycache__/deform_conv.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/functions/__pycache__/deform_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/functions/__pycache__/deform_pool.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/functions/deform_pool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | 4 | from .. import deform_pool_cuda 5 | 6 | 7 | class DeformRoIPoolingFunction(Function): 8 | 9 | @staticmethod 10 | def forward(ctx, 11 | data, 12 | rois, 13 | offset, 14 | spatial_scale, 15 | out_size, 16 | out_channels, 17 | no_trans, 18 | group_size=1, 19 | part_size=None, 20 | sample_per_part=4, 21 | trans_std=.0): 22 | ctx.spatial_scale = spatial_scale 23 | ctx.out_size = out_size 24 | ctx.out_channels = out_channels 25 | ctx.no_trans = no_trans 26 | ctx.group_size = group_size 27 | ctx.part_size = out_size if part_size is None else part_size 28 | ctx.sample_per_part = sample_per_part 29 | ctx.trans_std = trans_std 30 | 31 | assert 0.0 <= ctx.trans_std <= 1.0 32 | if not data.is_cuda: 33 | raise NotImplementedError 34 | 35 | n = rois.shape[0] 36 | output = data.new_empty(n, out_channels, out_size, out_size) 37 | output_count = data.new_empty(n, out_channels, out_size, out_size) 38 | deform_pool_cuda.deform_psroi_pooling_cuda_forward( 39 | data, rois, offset, output, output_count, ctx.no_trans, 40 | ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, 41 | ctx.part_size, ctx.sample_per_part, ctx.trans_std) 42 | 43 | if data.requires_grad or rois.requires_grad or offset.requires_grad: 44 | ctx.save_for_backward(data, rois, offset) 45 | ctx.output_count = output_count 46 | 47 | return output 48 | 49 | @staticmethod 50 | def backward(ctx, grad_output): 51 | if not grad_output.is_cuda: 52 | raise NotImplementedError 53 | 54 | data, rois, offset = ctx.saved_tensors 55 | output_count = ctx.output_count 56 | grad_input = torch.zeros_like(data) 57 | grad_rois = None 58 | grad_offset = torch.zeros_like(offset) 59 | 60 | deform_pool_cuda.deform_psroi_pooling_cuda_backward( 61 | grad_output, data, rois, offset, output_count, grad_input, 62 | grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, 63 | ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, 64 | ctx.trans_std) 65 | return (grad_input, grad_rois, grad_offset, None, None, None, None, 66 | None, None, None, None) 67 | 68 | 69 | deform_roi_pooling = DeformRoIPoolingFunction.apply 70 | -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/modules/__init__.py -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/modules/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/modules/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/modules/__pycache__/deform_conv.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/modules/__pycache__/deform_conv.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/modules/__pycache__/deform_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/dcn/modules/__pycache__/deform_pool.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/dcn/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='deform_conv', 6 | ext_modules=[ 7 | CUDAExtension('deform_conv_cuda', [ 8 | 'src/deform_conv_cuda.cpp', 9 | 'src/deform_conv_cuda_kernel.cu', 10 | ]), 11 | CUDAExtension('deform_pool_cuda', [ 12 | 'src/deform_pool_cuda.cpp', 'src/deform_pool_cuda_kernel.cu' 13 | ]), 14 | ], 15 | cmdclass={'build_ext': BuildExtension}) 16 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/.gitignore: -------------------------------------------------------------------------------- 1 | *.cpp 2 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/Makefile: -------------------------------------------------------------------------------- 1 | PYTHON=${PYTHON:-python} 2 | 3 | all: 4 | echo "Compiling nms kernels..." 5 | $(PYTHON) setup.py build_ext --inplace 6 | 7 | clean: 8 | rm -f *.so 9 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import nms, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms'] 4 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/nms/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/__pycache__/nms_wrapper.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/nms/__pycache__/nms_wrapper.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/cpu_nms.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/nms/cpu_nms.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/cpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | # cython: language_level=3, boundscheck=False 9 | 10 | import numpy as np 11 | cimport numpy as np 12 | 13 | cdef inline np.float32_t max(np.float32_t a, np.float32_t b): 14 | return a if a >= b else b 15 | 16 | cdef inline np.float32_t min(np.float32_t a, np.float32_t b): 17 | return a if a <= b else b 18 | 19 | def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): 20 | cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] 21 | cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] 22 | cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] 23 | cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] 24 | cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] 25 | 26 | cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) 27 | cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1] 28 | 29 | cdef int ndets = dets.shape[0] 30 | cdef np.ndarray[np.int_t, ndim=1] suppressed = \ 31 | np.zeros((ndets), dtype=np.int) 32 | 33 | # nominal indices 34 | cdef int _i, _j 35 | # sorted indices 36 | cdef int i, j 37 | # temp variables for box i's (the box currently under consideration) 38 | cdef np.float32_t ix1, iy1, ix2, iy2, iarea 39 | # variables for computing overlap with box j (lower scoring box) 40 | cdef np.float32_t xx1, yy1, xx2, yy2 41 | cdef np.float32_t w, h 42 | cdef np.float32_t inter, ovr 43 | 44 | keep = [] 45 | for _i in range(ndets): 46 | i = order[_i] 47 | if suppressed[i] == 1: 48 | continue 49 | keep.append(i) 50 | ix1 = x1[i] 51 | iy1 = y1[i] 52 | ix2 = x2[i] 53 | iy2 = y2[i] 54 | iarea = areas[i] 55 | for _j in range(_i + 1, ndets): 56 | j = order[_j] 57 | if suppressed[j] == 1: 58 | continue 59 | xx1 = max(ix1, x1[j]) 60 | yy1 = max(iy1, y1[j]) 61 | xx2 = min(ix2, x2[j]) 62 | yy2 = min(iy2, y2[j]) 63 | w = max(0.0, xx2 - xx1 + 1) 64 | h = max(0.0, yy2 - yy1 + 1) 65 | inter = w * h 66 | ovr = inter / (iarea + areas[j] - inter) 67 | if ovr >= thresh: 68 | suppressed[j] = 1 69 | 70 | return keep 71 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/cpu_soft_nms.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/nms/cpu_soft_nms.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/gpu_nms.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/nms/gpu_nms.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/gpu_nms.hpp: -------------------------------------------------------------------------------- 1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 2 | int boxes_dim, float nms_overlap_thresh, int device_id, size_t base); 3 | size_t nms_Malloc(); 4 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/gpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Faster R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | # cython: language_level=3, boundscheck=False 9 | 10 | import numpy as np 11 | cimport numpy as np 12 | 13 | assert sizeof(int) == sizeof(np.int32_t) 14 | 15 | cdef extern from "gpu_nms.hpp": 16 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int, size_t) nogil 17 | size_t nms_Malloc() nogil 18 | 19 | memory_pool = {} 20 | 21 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, 22 | np.int32_t device_id=0): 23 | cdef int boxes_num = dets.shape[0] 24 | cdef int boxes_dim = 5 25 | cdef int num_out 26 | cdef size_t base 27 | cdef np.ndarray[np.int32_t, ndim=1] \ 28 | keep = np.zeros(boxes_num, dtype=np.int32) 29 | cdef np.ndarray[np.float32_t, ndim=1] \ 30 | scores = dets[:, 4] 31 | cdef np.ndarray[np.int_t, ndim=1] \ 32 | order = scores.argsort()[::-1] 33 | cdef np.ndarray[np.float32_t, ndim=2] \ 34 | sorted_dets = dets[order, :5] 35 | cdef float cthresh = thresh 36 | if device_id not in memory_pool: 37 | with nogil: 38 | base = nms_Malloc() 39 | memory_pool[device_id] = base 40 | # print "malloc", base 41 | base = memory_pool[device_id] 42 | with nogil: 43 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, cthresh, device_id, base) 44 | keep = keep[:num_out] 45 | return list(order[keep]) 46 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/nms_wrapper.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .gpu_nms import gpu_nms 5 | from .cpu_nms import cpu_nms 6 | from .cpu_soft_nms import cpu_soft_nms 7 | 8 | 9 | def nms(dets, iou_thr, device_id=None): 10 | """Dispatch to either CPU or GPU NMS implementations.""" 11 | if isinstance(dets, torch.Tensor): 12 | is_tensor = True 13 | if dets.is_cuda: 14 | device_id = dets.get_device() 15 | dets_np = dets.detach().cpu().numpy() 16 | elif isinstance(dets, np.ndarray): 17 | is_tensor = False 18 | dets_np = dets 19 | else: 20 | raise TypeError( 21 | 'dets must be either a Tensor or numpy array, but got {}'.format( 22 | type(dets))) 23 | 24 | if dets_np.shape[0] == 0: 25 | inds = [] 26 | else: 27 | inds = (gpu_nms(dets_np, iou_thr, device_id=device_id) 28 | if device_id is not None else cpu_nms(dets_np, iou_thr)) 29 | 30 | if is_tensor: 31 | inds = dets.new_tensor(inds, dtype=torch.long) 32 | else: 33 | inds = np.array(inds, dtype=np.int64) 34 | return dets[inds, :], inds 35 | 36 | 37 | def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3): 38 | if isinstance(dets, torch.Tensor): 39 | is_tensor = True 40 | dets_np = dets.detach().cpu().numpy() 41 | elif isinstance(dets, np.ndarray): 42 | is_tensor = False 43 | dets_np = dets 44 | else: 45 | raise TypeError( 46 | 'dets must be either a Tensor or numpy array, but got {}'.format( 47 | type(dets))) 48 | 49 | method_codes = {'linear': 1, 'gaussian': 2} 50 | if method not in method_codes: 51 | raise ValueError('Invalid method for SoftNMS: {}'.format(method)) 52 | new_dets, inds = cpu_soft_nms( 53 | dets_np, 54 | iou_thr, 55 | method=method_codes[method], 56 | sigma=sigma, 57 | min_score=min_score) 58 | 59 | if is_tensor: 60 | return dets.new_tensor(new_dets), dets.new_tensor( 61 | inds, dtype=torch.long) 62 | else: 63 | return new_dets.astype(np.float32), inds.astype(np.int64) 64 | -------------------------------------------------------------------------------- /detection/mmdet/ops/nms/setup.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | from distutils.core import setup, Extension 3 | 4 | import numpy as np 5 | from Cython.Build import cythonize 6 | from Cython.Distutils import build_ext 7 | 8 | # extensions 9 | ext_args = dict( 10 | include_dirs=[np.get_include()], 11 | language='c++', 12 | extra_compile_args={ 13 | 'cc': ['-Wno-unused-function', '-Wno-write-strings'], 14 | 'nvcc': ['-c', '--compiler-options', '-fPIC'], 15 | }, 16 | ) 17 | 18 | extensions = [ 19 | Extension('cpu_nms', ['cpu_nms.pyx'], **ext_args), 20 | Extension('cpu_soft_nms', ['cpu_soft_nms.pyx'], **ext_args), 21 | Extension('gpu_nms', ['gpu_nms.pyx', 'nms_kernel.cu'], **ext_args), 22 | ] 23 | 24 | 25 | def customize_compiler_for_nvcc(self): 26 | """inject deep into distutils to customize how the dispatch 27 | to cc/nvcc works. 28 | If you subclass UnixCCompiler, it's not trivial to get your subclass 29 | injected in, and still have the right customizations (i.e. 30 | distutils.sysconfig.customize_compiler) run on it. So instead of going 31 | the OO route, I have this. Note, it's kindof like a wierd functional 32 | subclassing going on.""" 33 | 34 | # tell the compiler it can processes .cu 35 | self.src_extensions.append('.cu') 36 | 37 | # save references to the default compiler_so and _comple methods 38 | default_compiler_so = self.compiler_so 39 | super = self._compile 40 | 41 | # now redefine the _compile method. This gets executed for each 42 | # object but distutils doesn't have the ability to change compilers 43 | # based on source extension: we add it. 44 | def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): 45 | if osp.splitext(src)[1] == '.cu': 46 | # use the cuda for .cu files 47 | self.set_executable('compiler_so', 'nvcc') 48 | # use only a subset of the extra_postargs, which are 1-1 translated 49 | # from the extra_compile_args in the Extension class 50 | postargs = extra_postargs['nvcc'] 51 | else: 52 | postargs = extra_postargs['cc'] 53 | 54 | super(obj, src, ext, cc_args, postargs, pp_opts) 55 | # reset the default compiler_so, which we might have changed for cuda 56 | self.compiler_so = default_compiler_so 57 | 58 | # inject our redefined _compile method into the class 59 | self._compile = _compile 60 | 61 | 62 | # run the customize_compiler 63 | class custom_build_ext(build_ext): 64 | 65 | def build_extensions(self): 66 | customize_compiler_for_nvcc(self.compiler) 67 | build_ext.build_extensions(self) 68 | 69 | 70 | setup( 71 | name='nms', 72 | cmdclass={'build_ext': custom_build_ext}, 73 | ext_modules=cythonize(extensions), 74 | ) 75 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions.roi_align import roi_align 2 | from .modules.roi_align import RoIAlign 3 | 4 | __all__ = ['roi_align', 'RoIAlign'] 5 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/functions/__init__.py -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/functions/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/functions/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/functions/__pycache__/roi_align.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/functions/__pycache__/roi_align.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/functions/roi_align.py: -------------------------------------------------------------------------------- 1 | from torch.autograd import Function 2 | 3 | from .. import roi_align_cuda 4 | 5 | 6 | class RoIAlignFunction(Function): 7 | 8 | @staticmethod 9 | def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): 10 | if isinstance(out_size, int): 11 | out_h = out_size 12 | out_w = out_size 13 | elif isinstance(out_size, tuple): 14 | assert len(out_size) == 2 15 | assert isinstance(out_size[0], int) 16 | assert isinstance(out_size[1], int) 17 | out_h, out_w = out_size 18 | else: 19 | raise TypeError( 20 | '"out_size" must be an integer or tuple of integers') 21 | ctx.spatial_scale = spatial_scale 22 | ctx.sample_num = sample_num 23 | ctx.save_for_backward(rois) 24 | ctx.feature_size = features.size() 25 | 26 | batch_size, num_channels, data_height, data_width = features.size() 27 | num_rois = rois.size(0) 28 | 29 | output = features.new_zeros(num_rois, num_channels, out_h, out_w) 30 | if features.is_cuda: 31 | roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale, 32 | sample_num, output) 33 | else: 34 | raise NotImplementedError 35 | 36 | return output 37 | 38 | @staticmethod 39 | def backward(ctx, grad_output): 40 | feature_size = ctx.feature_size 41 | spatial_scale = ctx.spatial_scale 42 | sample_num = ctx.sample_num 43 | rois = ctx.saved_tensors[0] 44 | assert (feature_size is not None and grad_output.is_cuda) 45 | 46 | batch_size, num_channels, data_height, data_width = feature_size 47 | out_w = grad_output.size(3) 48 | out_h = grad_output.size(2) 49 | 50 | grad_input = grad_rois = None 51 | if ctx.needs_input_grad[0]: 52 | grad_input = rois.new_zeros(batch_size, num_channels, data_height, 53 | data_width) 54 | roi_align_cuda.backward(grad_output.contiguous(), rois, out_h, 55 | out_w, spatial_scale, sample_num, 56 | grad_input) 57 | 58 | return grad_input, grad_rois, None, None, None 59 | 60 | 61 | roi_align = RoIAlignFunction.apply 62 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/gradcheck.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch.autograd import gradcheck 4 | 5 | import os.path as osp 6 | import sys 7 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 8 | from roi_align import RoIAlign # noqa: E402 9 | 10 | feat_size = 15 11 | spatial_scale = 1.0 / 8 12 | img_size = feat_size / spatial_scale 13 | num_imgs = 2 14 | num_rois = 20 15 | 16 | batch_ind = np.random.randint(num_imgs, size=(num_rois, 1)) 17 | rois = np.random.rand(num_rois, 4) * img_size * 0.5 18 | rois[:, 2:] += img_size * 0.5 19 | rois = np.hstack((batch_ind, rois)) 20 | 21 | feat = torch.randn( 22 | num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0') 23 | rois = torch.from_numpy(rois).float().cuda() 24 | inputs = (feat, rois) 25 | print('Gradcheck for roi align...') 26 | test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3) 27 | print(test) 28 | test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3) 29 | print(test) 30 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/modules/__init__.py -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/modules/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/modules/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/modules/__pycache__/roi_align.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/modules/__pycache__/roi_align.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/modules/roi_align.py: -------------------------------------------------------------------------------- 1 | from torch.nn.modules.module import Module 2 | from ..functions.roi_align import RoIAlignFunction 3 | 4 | 5 | class RoIAlign(Module): 6 | 7 | def __init__(self, out_size, spatial_scale, sample_num=0): 8 | super(RoIAlign, self).__init__() 9 | 10 | self.out_size = out_size 11 | self.spatial_scale = float(spatial_scale) 12 | self.sample_num = int(sample_num) 13 | 14 | def forward(self, features, rois): 15 | return RoIAlignFunction.apply(features, rois, self.out_size, 16 | self.spatial_scale, self.sample_num) 17 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/roi_align_cuda.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_align/roi_align_cuda.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='roi_align_cuda', 6 | ext_modules=[ 7 | CUDAExtension('roi_align_cuda', [ 8 | 'src/roi_align_cuda.cpp', 9 | 'src/roi_align_kernel.cu', 10 | ]), 11 | ], 12 | cmdclass={'build_ext': BuildExtension}) 13 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_align/src/roi_align_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, 7 | const float spatial_scale, const int sample_num, 8 | const int channels, const int height, 9 | const int width, const int num_rois, 10 | const int pooled_height, const int pooled_width, 11 | at::Tensor output); 12 | 13 | int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, 14 | const float spatial_scale, const int sample_num, 15 | const int channels, const int height, 16 | const int width, const int num_rois, 17 | const int pooled_height, const int pooled_width, 18 | at::Tensor bottom_grad); 19 | 20 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 21 | #define CHECK_CONTIGUOUS(x) \ 22 | AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 23 | #define CHECK_INPUT(x) \ 24 | CHECK_CUDA(x); \ 25 | CHECK_CONTIGUOUS(x) 26 | 27 | int roi_align_forward_cuda(at::Tensor features, at::Tensor rois, 28 | int pooled_height, int pooled_width, 29 | float spatial_scale, int sample_num, 30 | at::Tensor output) { 31 | CHECK_INPUT(features); 32 | CHECK_INPUT(rois); 33 | CHECK_INPUT(output); 34 | 35 | // Number of ROIs 36 | int num_rois = rois.size(0); 37 | int size_rois = rois.size(1); 38 | 39 | if (size_rois != 5) { 40 | printf("wrong roi size\n"); 41 | return 0; 42 | } 43 | 44 | int num_channels = features.size(1); 45 | int data_height = features.size(2); 46 | int data_width = features.size(3); 47 | 48 | ROIAlignForwardLaucher(features, rois, spatial_scale, sample_num, 49 | num_channels, data_height, data_width, num_rois, 50 | pooled_height, pooled_width, output); 51 | 52 | return 1; 53 | } 54 | 55 | int roi_align_backward_cuda(at::Tensor top_grad, at::Tensor rois, 56 | int pooled_height, int pooled_width, 57 | float spatial_scale, int sample_num, 58 | at::Tensor bottom_grad) { 59 | CHECK_INPUT(top_grad); 60 | CHECK_INPUT(rois); 61 | CHECK_INPUT(bottom_grad); 62 | 63 | // Number of ROIs 64 | int num_rois = rois.size(0); 65 | int size_rois = rois.size(1); 66 | if (size_rois != 5) { 67 | printf("wrong roi size\n"); 68 | return 0; 69 | } 70 | 71 | int num_channels = bottom_grad.size(1); 72 | int data_height = bottom_grad.size(2); 73 | int data_width = bottom_grad.size(3); 74 | 75 | ROIAlignBackwardLaucher(top_grad, rois, spatial_scale, sample_num, 76 | num_channels, data_height, data_width, num_rois, 77 | pooled_height, pooled_width, bottom_grad); 78 | 79 | return 1; 80 | } 81 | 82 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 83 | m.def("forward", &roi_align_forward_cuda, "Roi_Align forward (CUDA)"); 84 | m.def("backward", &roi_align_backward_cuda, "Roi_Align backward (CUDA)"); 85 | } 86 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions.roi_pool import roi_pool 2 | from .modules.roi_pool import RoIPool 3 | 4 | __all__ = ['roi_pool', 'RoIPool'] 5 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/functions/__init__.py -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/functions/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/functions/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/functions/__pycache__/roi_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/functions/__pycache__/roi_pool.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/functions/roi_pool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | 4 | from .. import roi_pool_cuda 5 | 6 | 7 | class RoIPoolFunction(Function): 8 | 9 | @staticmethod 10 | def forward(ctx, features, rois, out_size, spatial_scale): 11 | if isinstance(out_size, int): 12 | out_h = out_size 13 | out_w = out_size 14 | elif isinstance(out_size, tuple): 15 | assert len(out_size) == 2 16 | assert isinstance(out_size[0], int) 17 | assert isinstance(out_size[1], int) 18 | out_h, out_w = out_size 19 | else: 20 | raise TypeError( 21 | '"out_size" must be an integer or tuple of integers') 22 | assert features.is_cuda 23 | ctx.save_for_backward(rois) 24 | num_channels = features.size(1) 25 | num_rois = rois.size(0) 26 | out_size = (num_rois, num_channels, out_h, out_w) 27 | output = features.new_zeros(out_size) 28 | argmax = features.new_zeros(out_size, dtype=torch.int) 29 | roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, 30 | output, argmax) 31 | ctx.spatial_scale = spatial_scale 32 | ctx.feature_size = features.size() 33 | ctx.argmax = argmax 34 | 35 | return output 36 | 37 | @staticmethod 38 | def backward(ctx, grad_output): 39 | assert grad_output.is_cuda 40 | spatial_scale = ctx.spatial_scale 41 | feature_size = ctx.feature_size 42 | argmax = ctx.argmax 43 | rois = ctx.saved_tensors[0] 44 | assert feature_size is not None 45 | 46 | grad_input = grad_rois = None 47 | if ctx.needs_input_grad[0]: 48 | grad_input = grad_output.new_zeros(feature_size) 49 | roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, 50 | spatial_scale, grad_input) 51 | 52 | return grad_input, grad_rois, None, None 53 | 54 | 55 | roi_pool = RoIPoolFunction.apply 56 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/gradcheck.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import gradcheck 3 | 4 | import os.path as osp 5 | import sys 6 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 7 | from roi_pool import RoIPool # noqa: E402 8 | 9 | feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() 10 | rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], 11 | [1, 67, 40, 110, 120]]).cuda() 12 | inputs = (feat, rois) 13 | print('Gradcheck for roi pooling...') 14 | test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) 15 | print(test) 16 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/modules/__init__.py -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/modules/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/modules/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/modules/__pycache__/roi_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/modules/__pycache__/roi_pool.cpython-36.pyc -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/modules/roi_pool.py: -------------------------------------------------------------------------------- 1 | from torch.nn.modules.module import Module 2 | from ..functions.roi_pool import roi_pool 3 | 4 | 5 | class RoIPool(Module): 6 | 7 | def __init__(self, out_size, spatial_scale): 8 | super(RoIPool, self).__init__() 9 | 10 | self.out_size = out_size 11 | self.spatial_scale = float(spatial_scale) 12 | 13 | def forward(self, features, rois): 14 | return roi_pool(features, rois, self.out_size, self.spatial_scale) 15 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/roi_pool_cuda.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/mmdet/ops/roi_pool/roi_pool_cuda.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='roi_pool', 6 | ext_modules=[ 7 | CUDAExtension('roi_pool_cuda', [ 8 | 'src/roi_pool_cuda.cpp', 9 | 'src/roi_pool_kernel.cu', 10 | ]) 11 | ], 12 | cmdclass={'build_ext': BuildExtension}) 13 | -------------------------------------------------------------------------------- /detection/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois, 7 | const float spatial_scale, const int channels, 8 | const int height, const int width, const int num_rois, 9 | const int pooled_h, const int pooled_w, 10 | at::Tensor output, at::Tensor argmax); 11 | 12 | int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, 13 | const at::Tensor argmax, const float spatial_scale, 14 | const int batch_size, const int channels, 15 | const int height, const int width, 16 | const int num_rois, const int pooled_h, 17 | const int pooled_w, at::Tensor bottom_grad); 18 | 19 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 20 | #define CHECK_CONTIGUOUS(x) \ 21 | AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 22 | #define CHECK_INPUT(x) \ 23 | CHECK_CUDA(x); \ 24 | CHECK_CONTIGUOUS(x) 25 | 26 | int roi_pooling_forward_cuda(at::Tensor features, at::Tensor rois, 27 | int pooled_height, int pooled_width, 28 | float spatial_scale, at::Tensor output, 29 | at::Tensor argmax) { 30 | CHECK_INPUT(features); 31 | CHECK_INPUT(rois); 32 | CHECK_INPUT(output); 33 | CHECK_INPUT(argmax); 34 | 35 | // Number of ROIs 36 | int num_rois = rois.size(0); 37 | int size_rois = rois.size(1); 38 | 39 | if (size_rois != 5) { 40 | printf("wrong roi size\n"); 41 | return 0; 42 | } 43 | 44 | int channels = features.size(1); 45 | int height = features.size(2); 46 | int width = features.size(3); 47 | 48 | ROIPoolForwardLaucher(features, rois, spatial_scale, channels, height, width, 49 | num_rois, pooled_height, pooled_width, output, argmax); 50 | 51 | return 1; 52 | } 53 | 54 | int roi_pooling_backward_cuda(at::Tensor top_grad, at::Tensor rois, 55 | at::Tensor argmax, float spatial_scale, 56 | at::Tensor bottom_grad) { 57 | CHECK_INPUT(top_grad); 58 | CHECK_INPUT(rois); 59 | CHECK_INPUT(argmax); 60 | CHECK_INPUT(bottom_grad); 61 | 62 | int pooled_height = top_grad.size(2); 63 | int pooled_width = top_grad.size(3); 64 | int num_rois = rois.size(0); 65 | int size_rois = rois.size(1); 66 | 67 | if (size_rois != 5) { 68 | printf("wrong roi size\n"); 69 | return 0; 70 | } 71 | int batch_size = bottom_grad.size(0); 72 | int channels = bottom_grad.size(1); 73 | int height = bottom_grad.size(2); 74 | int width = bottom_grad.size(3); 75 | 76 | ROIPoolBackwardLaucher(top_grad, rois, argmax, spatial_scale, batch_size, 77 | channels, height, width, num_rois, pooled_height, 78 | pooled_width, bottom_grad); 79 | 80 | return 1; 81 | } 82 | 83 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 84 | m.def("forward", &roi_pooling_forward_cuda, "Roi_Pooling forward (CUDA)"); 85 | m.def("backward", &roi_pooling_backward_cuda, "Roi_Pooling backward (CUDA)"); 86 | } 87 | -------------------------------------------------------------------------------- /detection/mmdet/version.py: -------------------------------------------------------------------------------- 1 | # GENERATED VERSION FILE 2 | # TIME: Wed Jun 5 06:01:46 2019 3 | 4 | __version__ = '0.6.rc0+edb0393' 5 | short_version = '0.6.rc0' 6 | -------------------------------------------------------------------------------- /detection/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/tools/__init__.py -------------------------------------------------------------------------------- /detection/tools/__pycache__/flops_counter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/implus/PytorchInsight/2864528f8b83f52c3df76f7c3804aa468b91e5cf/detection/tools/__pycache__/flops_counter.cpython-36.pyc -------------------------------------------------------------------------------- /detection/tools/coco_eval.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | from mmdet.core import coco_eval 4 | 5 | 6 | def main(): 7 | parser = ArgumentParser(description='COCO Evaluation') 8 | parser.add_argument('result', help='result file path') 9 | parser.add_argument('--ann', help='annotation file path') 10 | parser.add_argument( 11 | '--types', 12 | type=str, 13 | nargs='+', 14 | choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'], 15 | default=['bbox'], 16 | help='result types') 17 | parser.add_argument( 18 | '--max-dets', 19 | type=int, 20 | nargs='+', 21 | default=[100, 300, 1000], 22 | help='proposal numbers, only used for recall evaluation') 23 | args = parser.parse_args() 24 | coco_eval(args.result, args.types, args.ann, args.max_dets) 25 | 26 | 27 | if __name__ == '__main__': 28 | main() 29 | -------------------------------------------------------------------------------- /detection/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | $PYTHON -m torch.distributed.launch --nproc_per_node=$2 $(dirname "$0")/train.py $1 --launcher pytorch ${@:3} 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /detection/tools/train.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import argparse 4 | from mmcv import Config 5 | 6 | from mmdet import __version__ 7 | from mmdet.datasets import get_dataset 8 | from mmdet.apis import (train_detector, init_dist, get_root_logger, 9 | set_random_seed) 10 | from mmdet.models import build_detector 11 | import torch 12 | 13 | 14 | def parse_args(): 15 | parser = argparse.ArgumentParser(description='Train a detector') 16 | parser.add_argument('config', help='train config file path') 17 | parser.add_argument('--work_dir', help='the dir to save logs and models') 18 | parser.add_argument( 19 | '--resume_from', help='the checkpoint file to resume from') 20 | parser.add_argument( 21 | '--validate', 22 | action='store_true', 23 | #default=True, 24 | help='whether to evaluate the checkpoint during training') 25 | parser.add_argument( 26 | '--gpus', 27 | type=int, 28 | default=1, 29 | help='number of gpus to use ' 30 | '(only applicable to non-distributed training)') 31 | parser.add_argument('--seed', type=int, default=None, help='random seed') 32 | parser.add_argument( 33 | '--launcher', 34 | choices=['none', 'pytorch', 'slurm', 'mpi'], 35 | default='none', 36 | help='job launcher') 37 | parser.add_argument('--local_rank', type=int, default=0) 38 | args = parser.parse_args() 39 | 40 | return args 41 | 42 | 43 | def main(): 44 | args = parse_args() 45 | 46 | cfg = Config.fromfile(args.config) 47 | # set cudnn_benchmark 48 | if cfg.get('cudnn_benchmark', False): 49 | torch.backends.cudnn.benchmark = True 50 | # update configs according to CLI args 51 | if args.work_dir is not None: 52 | cfg.work_dir = args.work_dir 53 | if args.resume_from is not None: 54 | cfg.resume_from = args.resume_from 55 | 56 | #cfg.resume_from = 'work_dirs/retinanet_r101_fpn_2x_pretrain_cbam_resnet101/epoch_9.pth' 57 | #print('resume from', cfg.resume_from) 58 | 59 | cfg.gpus = args.gpus 60 | if cfg.checkpoint_config is not None: 61 | # save mmdet version in checkpoints as meta data 62 | cfg.checkpoint_config.meta = dict( 63 | mmdet_version=__version__, config=cfg.text) 64 | 65 | # init distributed env first, since logger depends on the dist info. 66 | if args.launcher == 'none': 67 | distributed = False 68 | else: 69 | distributed = True 70 | init_dist(args.launcher, **cfg.dist_params) 71 | 72 | # init logger before other steps 73 | logger = get_root_logger(cfg.log_level) 74 | logger.info('Distributed training: {}'.format(distributed)) 75 | 76 | # set random seeds 77 | if args.seed is not None: 78 | logger.info('Set random seed to {}'.format(args.seed)) 79 | set_random_seed(args.seed) 80 | 81 | model = build_detector( 82 | cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) 83 | 84 | train_dataset = get_dataset(cfg.data.train) 85 | train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger) 86 | 87 | 88 | if __name__ == '__main__': 89 | main() 90 | -------------------------------------------------------------------------------- /detection/tools/voc_eval.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | import mmcv 4 | import numpy as np 5 | 6 | from mmdet import datasets 7 | from mmdet.core import eval_map 8 | 9 | 10 | def voc_eval(result_file, dataset, iou_thr=0.5): 11 | det_results = mmcv.load(result_file) 12 | gt_bboxes = [] 13 | gt_labels = [] 14 | gt_ignore = [] 15 | for i in range(len(dataset)): 16 | ann = dataset.get_ann_info(i) 17 | bboxes = ann['bboxes'] 18 | labels = ann['labels'] 19 | if 'bboxes_ignore' in ann: 20 | ignore = np.concatenate([ 21 | np.zeros(bboxes.shape[0], dtype=np.bool), 22 | np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool) 23 | ]) 24 | gt_ignore.append(ignore) 25 | bboxes = np.vstack([bboxes, ann['bboxes_ignore']]) 26 | labels = np.concatenate([labels, ann['labels_ignore']]) 27 | gt_bboxes.append(bboxes) 28 | gt_labels.append(labels) 29 | if not gt_ignore: 30 | gt_ignore = gt_ignore 31 | if hasattr(dataset, 'year') and dataset.year == 2007: 32 | dataset_name = 'voc07' 33 | else: 34 | dataset_name = dataset.CLASSES 35 | eval_map( 36 | det_results, 37 | gt_bboxes, 38 | gt_labels, 39 | gt_ignore=gt_ignore, 40 | scale_ranges=None, 41 | iou_thr=iou_thr, 42 | dataset=dataset_name, 43 | print_summary=True) 44 | 45 | 46 | def main(): 47 | parser = ArgumentParser(description='VOC Evaluation') 48 | parser.add_argument('result', help='result file path') 49 | parser.add_argument('config', help='config file path') 50 | parser.add_argument( 51 | '--iou-thr', 52 | type=float, 53 | default=0.5, 54 | help='IoU threshold for evaluation') 55 | args = parser.parse_args() 56 | cfg = mmcv.Config.fromfile(args.config) 57 | test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets) 58 | voc_eval(args.result, test_dataset, args.iou_thr) 59 | 60 | 61 | if __name__ == '__main__': 62 | main() 63 | --------------------------------------------------------------------------------