├── .github ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── error-report.md │ ├── feature_request.md │ ├── general_questions.md │ └── reimplementation_questions.md └── workflows │ └── build.yml ├── .gitignore ├── .gitmodules ├── .isort.cfg ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── .style.yapf ├── LICENSE ├── README.md ├── configs ├── _base_ │ ├── datasets │ │ ├── cityscapes_detection.py │ │ ├── cityscapes_instance.py │ │ ├── coco_detection.py │ │ ├── coco_instance.py │ │ ├── coco_instance_semantic.py │ │ ├── deepfashion.py │ │ ├── lvis_instance.py │ │ ├── voc0712.py │ │ └── wider_face.py │ ├── default_runtime.py │ ├── models │ │ ├── cascade_mask_rcnn_r50_fpn.py │ │ ├── cascade_rcnn_r50_fpn.py │ │ ├── fast_rcnn_r50_fpn.py │ │ ├── faster_rcnn_r50_caffe_c4.py │ │ ├── faster_rcnn_r50_fpn.py │ │ ├── mask_rcnn_r50_caffe_c4.py │ │ ├── mask_rcnn_r50_fpn.py │ │ ├── retinanet_r50_fpn.py │ │ ├── rpn_r50_caffe_c4.py │ │ ├── rpn_r50_fpn.py │ │ └── ssd300.py │ └── schedules │ │ ├── schedule_1x.py │ │ ├── schedule_20e.py │ │ └── schedule_2x.py ├── albu_example │ ├── README.md │ └── mask_rcnn_r50_fpn_albu_1x_coco.py ├── atss │ ├── README.md │ └── atss_r50_fpn_1x_coco.py ├── carafe │ ├── README.md │ ├── faster_rcnn_r50_fpn_carafe_1x_coco.py │ └── mask_rcnn_r50_fpn_carafe_1x_coco.py ├── cascade_rcnn │ ├── README.md │ ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py │ ├── cascade_mask_rcnn_r101_fpn_1x_coco.py │ ├── cascade_mask_rcnn_r101_fpn_20e_coco.py │ ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py │ ├── cascade_mask_rcnn_r50_fpn_1x_coco.py │ ├── cascade_mask_rcnn_r50_fpn_20e_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py │ ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py │ ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py │ ├── cascade_rcnn_r101_fpn_1x_coco.py │ ├── cascade_rcnn_r101_fpn_20e_coco.py │ ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py │ ├── cascade_rcnn_r50_fpn_1x_coco.py │ ├── cascade_rcnn_r50_fpn_20e_coco.py │ ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py │ ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py │ ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py │ └── cascade_rcnn_x101_64x4d_fpn_20e_coco.py ├── cityscapes │ ├── README.md │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py │ └── mask_rcnn_r50_fpn_1x_cityscapes.py ├── dcn │ ├── README.md │ ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ ├── faster_rcnn_r50_fpn_dpool_1x_coco.py │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py │ ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py │ ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ └── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py ├── deepfashion │ ├── README.md │ └── mask_rcnn_r50_fpn_15e_deepfashion.py ├── detectors │ ├── README.md │ ├── cascade_rcnn_r50_rfp_1x_coco.py │ ├── cascade_rcnn_r50_sac_1x_coco.py │ ├── detectors_cascade_rcnn_r50_1x_coco.py │ ├── detectors_htc_r50_1x_coco.py │ ├── htc_r50_rfp_1x_coco.py │ └── htc_r50_sac_1x_coco.py ├── double_heads │ ├── README.md │ └── dh_faster_rcnn_r50_fpn_1x_coco.py ├── dynamic_rcnn │ ├── README.md │ └── dynamic_rcnn_r50_fpn_1x.py ├── empirical_attention │ ├── README.md │ ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py │ ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py │ └── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py ├── fast_rcnn │ ├── README.md │ ├── fast_rcnn_r101_caffe_fpn_1x_coco.py │ ├── fast_rcnn_r101_fpn_1x_coco.py │ ├── fast_rcnn_r101_fpn_2x_coco.py │ ├── fast_rcnn_r50_caffe_fpn_1x_coco.py │ ├── fast_rcnn_r50_fpn_1x_coco.py │ └── fast_rcnn_r50_fpn_2x_coco.py ├── faster_rcnn │ ├── README.md │ ├── faster_rcnn_r101_caffe_fpn_1x_coco.py │ ├── faster_rcnn_r101_fpn_1x_coco.py │ ├── faster_rcnn_r101_fpn_2x_coco.py │ ├── faster_rcnn_r50_caffe_c4_1x_coco.py │ ├── faster_rcnn_r50_caffe_fpn_1x_coco.py │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py │ ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py │ ├── faster_rcnn_r50_fpn_1x_coco-person-bicycle-car.py │ ├── faster_rcnn_r50_fpn_1x_coco-person.py │ ├── faster_rcnn_r50_fpn_1x_coco.py │ ├── faster_rcnn_r50_fpn_2x_coco.py │ ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py │ ├── faster_rcnn_r50_fpn_giou_1x_coco.py │ ├── faster_rcnn_r50_fpn_iou_1x_coco.py │ ├── faster_rcnn_r50_fpn_ohem_1x_coco.py │ ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py │ ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py │ ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py │ ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py │ └── faster_rcnn_x101_64x4d_fpn_2x_coco.py ├── fcos │ ├── README.md │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_4x4_1x_coco.py │ ├── fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ ├── fcos_r101_caffe_fpn_gn-head_4x4_1x_coco.py │ ├── fcos_r101_caffe_fpn_gn-head_4x4_2x_coco.py │ ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ ├── fcos_r50_caffe_fpn_4x4_1x_coco.py │ ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ ├── fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py │ ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ └── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_4x2_2x_coco.py ├── foveabox │ ├── README.md │ ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py │ ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py │ ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ ├── fovea_r101_fpn_4x4_1x_coco.py │ ├── fovea_r101_fpn_4x4_2x_coco.py │ ├── fovea_r50_fpn_4x4_1x_coco.py │ └── fovea_r50_fpn_4x4_2x_coco.py ├── fp16 │ ├── README.md │ ├── faster_rcnn_r50_fpn_fp16_1x_coco.py │ ├── mask_rcnn_r50_fpn_fp16_1x_coco.py │ └── retinanet_r50_fpn_fp16_1x_coco.py ├── free_anchor │ ├── README.md │ ├── retinanet_free_anchor_r101_fpn_1x_coco.py │ ├── retinanet_free_anchor_r50_fpn_1x_coco.py │ └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py ├── fsaf │ ├── README.md │ ├── fsaf_r101_fpn_1x_coco.py │ ├── fsaf_r50_fpn_1x_coco.py │ └── fsaf_x101_64x4d_fpn_1x_coco.py ├── gcnet │ ├── README.md │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ └── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py ├── gfl │ ├── README.md │ ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py │ ├── gfl_r101_fpn_mstrain_2x_coco.py │ ├── gfl_r50_fpn_1x_coco.py │ ├── gfl_r50_fpn_mstrain_2x_coco.py │ ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py │ └── gfl_x101_32x4d_fpn_mstrain_2x_coco.py ├── ghm │ ├── README.md │ ├── retinanet_ghm_r101_fpn_1x_coco.py │ ├── retinanet_ghm_r50_fpn_1x_coco.py │ ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py │ └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py ├── gn+ws │ ├── README.md │ ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py │ ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py │ ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py │ ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py │ ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py │ ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py │ ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py │ ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ └── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py ├── gn │ ├── README.md │ ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py │ ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py │ ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py │ ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py │ ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py │ └── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py ├── grid_rcnn │ ├── README.md │ ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py │ ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py │ ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py │ ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py │ └── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py ├── groie │ ├── README.md │ ├── faster_rcnn_r50_fpn_groie_1x_coco.py │ ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ ├── mask_rcnn_r50_fpn_groie_1x_coco.py │ └── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py ├── guided_anchoring │ ├── README.md │ ├── ga_fast_r50_caffe_fpn_1x_coco.py │ ├── ga_faster_r101_caffe_fpn_1x_coco.py │ ├── ga_faster_r50_caffe_fpn_1x_coco.py │ ├── ga_faster_r50_fpn_1x_coco.py │ ├── ga_faster_x101_32x4d_fpn_1x_coco.py │ ├── ga_faster_x101_64x4d_fpn_1x_coco.py │ ├── ga_retinanet_r101_caffe_fpn_1x_coco.py │ ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py │ ├── ga_retinanet_r50_caffe_fpn_1x_coco.py │ ├── ga_retinanet_r50_fpn_1x_coco.py │ ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py │ ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py │ ├── ga_rpn_r101_caffe_fpn_1x_coco.py │ ├── ga_rpn_r50_caffe_fpn_1x_coco.py │ ├── ga_rpn_r50_fpn_1x_coco.py │ ├── ga_rpn_x101_32x4d_fpn_1x_coco.py │ └── ga_rpn_x101_64x4d_fpn_1x_coco.py ├── hrnet │ ├── README.md │ ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py │ ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py │ ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py │ ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py │ ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py │ ├── faster_rcnn_hrnetv2p_w18_1x_coco.py │ ├── faster_rcnn_hrnetv2p_w18_2x_coco.py │ ├── faster_rcnn_hrnetv2p_w32_1x_coco.py │ ├── faster_rcnn_hrnetv2p_w32_2x_coco.py │ ├── faster_rcnn_hrnetv2p_w40_1x_coco.py │ ├── faster_rcnn_hrnetv2p_w40_2x_coco.py │ ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py │ ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py │ ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py │ ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py │ ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py │ ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py │ ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py │ ├── htc_hrnetv2p_w18_20e_coco.py │ ├── htc_hrnetv2p_w32_20e_coco.py │ ├── htc_hrnetv2p_w40_20e_coco.py │ ├── htc_hrnetv2p_w40_28e_coco.py │ ├── htc_x101_64x4d_fpn_16x1_28e_coco.py │ ├── mask_rcnn_hrnetv2p_w18_1x_coco.py │ ├── mask_rcnn_hrnetv2p_w18_2x_coco.py │ ├── mask_rcnn_hrnetv2p_w32_1x_coco.py │ ├── mask_rcnn_hrnetv2p_w32_2x_coco.py │ ├── mask_rcnn_hrnetv2p_w40_1x_coco.py │ └── mask_rcnn_hrnetv2p_w40_2x_coco.py ├── htc │ ├── README.md │ ├── htc_r101_fpn_20e_coco.py │ ├── htc_r50_fpn_1x_coco.py │ ├── htc_r50_fpn_20e_coco.py │ ├── htc_without_semantic_r50_fpn_1x_coco.py │ ├── htc_x101_32x4d_fpn_16x1_20e_coco.py │ ├── htc_x101_64x4d_fpn_16x1_20e_coco.py │ └── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py ├── instaboost │ ├── README.md │ ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py │ ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py │ ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py │ └── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py ├── legacy_1.x │ ├── README.md │ ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py │ ├── faster_rcnn_r50_fpn_1x_coco_v1.py │ ├── mask_rcnn_r50_fpn_1x_coco_v1.py │ ├── retinanet_r50_caffe_fpn_1x_coco_v1.py │ ├── retinanet_r50_fpn_1x_coco_v1.py │ └── ssd300_coco_v1.py ├── libra_rcnn │ ├── README.md │ ├── libra_fast_rcnn_r50_fpn_1x_coco.py │ ├── libra_faster_rcnn_r101_fpn_1x_coco.py │ ├── libra_faster_rcnn_r50_fpn_1x_coco.py │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py │ └── libra_retinanet_r50_fpn_1x_coco.py ├── lvis │ ├── README.md │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis.py │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis.py │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis.py │ └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis.py ├── mask_rcnn │ ├── README.md │ ├── mask_rcnn_r101_caffe_fpn_1x_coco.py │ ├── mask_rcnn_r101_fpn_1x_coco.py │ ├── mask_rcnn_r101_fpn_2x_coco.py │ ├── mask_rcnn_r50_caffe_c4_1x_coco.py │ ├── mask_rcnn_r50_caffe_fpn_1x_coco.py │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py │ ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py │ ├── mask_rcnn_r50_fpn_1x_coco.py │ ├── mask_rcnn_r50_fpn_2x_coco.py │ ├── mask_rcnn_r50_fpn_poly_1x_coco.py │ ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py │ ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py │ ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py │ ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py │ └── mask_rcnn_x101_64x4d_fpn_2x_coco.py ├── ms_rcnn │ ├── README.md │ ├── ms_rcnn_r101_caffe_fpn_1x_coco.py │ ├── ms_rcnn_r101_caffe_fpn_2x_coco.py │ ├── ms_rcnn_r50_caffe_fpn_1x_coco.py │ ├── ms_rcnn_r50_caffe_fpn_2x_coco.py │ ├── ms_rcnn_r50_fpn_1x_coco.py │ ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py │ ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py │ └── ms_rcnn_x101_64x4d_fpn_2x_coco.py ├── nas_fcos │ ├── README.md │ ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py ├── nas_fpn │ ├── README.md │ ├── retinanet_r50_fpn_crop640_50e_coco.py │ └── retinanet_r50_nasfpn_crop640_50e_coco.py ├── obb │ ├── _base_ │ │ ├── datasets │ │ │ ├── dior.py │ │ │ ├── dota.py │ │ │ ├── hrsc.py │ │ │ ├── isaid.py │ │ │ ├── msra_td500.py │ │ │ └── rctw17.py │ │ └── schedules │ │ │ ├── schedule_1x.py │ │ │ ├── schedule_2x.py │ │ │ └── schedule_3x.py │ ├── arc │ │ ├── arc_orcnn_r101fpn1x_ss_dota10_RxFFF_n4.py │ │ ├── arc_orcnn_r50fpn1x_ss_dota10_RxFFF_n4.py │ │ ├── orcnn_r101fpn1x_ss_dota10.py │ │ └── orcnn_r50fpn1x_ss_dota10.py │ ├── atss_obb │ │ └── README.md │ ├── double_heads_obb │ │ ├── README.md │ │ └── dh_faster_rcnn_obb_r50_fpn_1x_dota10.py │ ├── faster_rcnn_obb │ │ ├── README.md │ │ ├── faster_rcnn_obb_r101_fpn_1x_dota10.py │ │ ├── faster_rcnn_obb_r101_fpn_3x_hrsc.py │ │ ├── faster_rcnn_obb_r50_fpn_1x_dota10.py │ │ └── faster_rcnn_obb_r50_fpn_3x_hrsc.py │ ├── fcos_obb │ │ ├── README.md │ │ └── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_dota10.py │ ├── gliding_vertex │ │ ├── README.md │ │ ├── gliding_vertex_r101_fpn_1x_dota10.py │ │ ├── gliding_vertex_r101_fpn_3x_hrsc.py │ │ ├── gliding_vertex_r50_fpn_1x_dota10.py │ │ └── gliding_vertex_r50_fpn_3x_hrsc.py │ ├── mask_rcnn │ │ ├── README.md │ │ └── mask_rcnn_r50_fpn_1x_isaid.py │ ├── oriented_rcnn │ │ ├── README.md │ │ ├── faster_rcnn_orpn_r101_fpn_1x_dota10.py │ │ ├── faster_rcnn_orpn_r101_fpn_1x_ms_rr_dota10.py │ │ ├── faster_rcnn_orpn_r101_fpn_3x_hrsc.py │ │ ├── faster_rcnn_orpn_r50_fpn_1x_dota10.py │ │ ├── faster_rcnn_orpn_r50_fpn_1x_ms_rr_dota10.py │ │ ├── faster_rcnn_orpn_r50_fpn_3x_hrsc.py │ │ ├── illustration.jpg │ │ ├── orpn_r101_fpn_1x_dota10.py │ │ └── orpn_r50_fpn_1x_dota10.py │ ├── oriented_rcnn_beyond │ │ ├── README.md │ │ ├── illustration.jpg │ │ ├── mask_rcnn_orpn_r101_fpn_1x_isaid.py │ │ ├── mask_rcnn_orpn_r152_fpn_1x_isaid.py │ │ └── mask_rcnn_orpn_r50_fpn_1x_isaid.py │ ├── poly_iou_loss │ │ ├── README.md │ │ ├── retinanet_obb_r50_fpn_giouloss_1x_dota.py │ │ └── retinanet_obb_r50_fpn_iouloss_1x_dota.py │ ├── random_fp │ │ ├── README.md │ │ ├── datasets │ │ │ ├── 01fp_dota10.py │ │ │ ├── 03fp_dota10.py │ │ │ └── 05fp_dota10.py │ │ ├── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_01fp_dota10.py │ │ ├── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_03fp_dota10.py │ │ └── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_05fp_dota10.py │ ├── retinanet_obb │ │ ├── README.md │ │ ├── retinanet_obb_r101_fpn_1x_dota10.py │ │ ├── retinanet_obb_r101_fpn_2x_dota10.py │ │ ├── retinanet_obb_r101_fpn_3x_hrsc.py │ │ ├── retinanet_obb_r50_fpn_1x_dota10.py │ │ ├── retinanet_obb_r50_fpn_2x_dota10.py │ │ └── retinanet_obb_r50_fpn_3x_hrsc.py │ ├── roi_transformer │ │ ├── README.md │ │ ├── faster_rcnn_roitrans_r101_fpn_1x_dota10.py │ │ ├── faster_rcnn_roitrans_r101_fpn_3x_hrsc.py │ │ ├── faster_rcnn_roitrans_r50_fpn_1x_dota10.py │ │ └── faster_rcnn_roitrans_r50_fpn_3x_hrsc.py │ └── s2anet │ │ ├── README.md │ │ └── s2anet_r50_fpn_1x_dota10.py ├── pafpn │ ├── README.md │ └── faster_rcnn_r50_pafpn_1x_coco.py ├── pascal_voc │ ├── README.md │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ ├── retinanet_r50_fpn_1x_voc0712.py │ ├── ssd300_voc0712.py │ └── ssd512_voc0712.py ├── pisa │ ├── README.md │ ├── pisa_faster_rcnn_r50_fpn_1x_coco.py │ ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py │ ├── pisa_mask_rcnn_r50_fpn_1x_coco.py │ ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ ├── pisa_retinanet_r50_fpn_1x_coco.py │ ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py │ ├── pisa_ssd300_coco.py │ └── pisa_ssd512_coco.py ├── point_rend │ ├── README.md │ ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py │ └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py ├── regnet │ ├── README.md │ ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py │ ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ ├── faster_rcnn_regnetx-3GF_fpn_mstrain_3x_coco.py │ ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py │ ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py │ ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py │ ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py │ ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py │ ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py │ ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py │ └── retinanet_regnetx-800MF_fpn_1x_coco.py ├── reppoints │ ├── README.md │ ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py │ ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py │ ├── reppoints.png │ ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py │ ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py │ ├── reppoints_moment_r50_fpn_1x_coco.py │ ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py │ ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py │ ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py ├── res2net │ ├── README.md │ ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py │ ├── cascade_rcnn_r2_101_fpn_20e_coco.py │ ├── faster_rcnn_r2_101_fpn_2x_coco.py │ ├── htc_r2_101_fpn_20e_coco.py │ └── mask_rcnn_r2_101_fpn_2x_coco.py ├── retinanet │ ├── README.md │ ├── retinanet_r101_caffe_fpn_1x_coco.py │ ├── retinanet_r101_fpn_1x_coco.py │ ├── retinanet_r101_fpn_2x_coco.py │ ├── retinanet_r50_caffe_fpn_1x_coco.py │ ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py │ ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py │ ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py │ ├── retinanet_r50_fpn_1x_coco.py │ ├── retinanet_r50_fpn_2x_coco.py │ ├── retinanet_x101_32x4d_fpn_1x_coco.py │ ├── retinanet_x101_32x4d_fpn_2x_coco.py │ ├── retinanet_x101_64x4d_fpn_1x_coco.py │ └── retinanet_x101_64x4d_fpn_2x_coco.py ├── rpn │ ├── README.md │ ├── rpn_r101_caffe_fpn_1x_coco.py │ ├── rpn_r101_fpn_1x_coco.py │ ├── rpn_r101_fpn_2x_coco.py │ ├── rpn_r50_caffe_c4_1x_coco.py │ ├── rpn_r50_caffe_fpn_1x_coco.py │ ├── rpn_r50_fpn_1x_coco.py │ ├── rpn_r50_fpn_2x_coco.py │ ├── rpn_x101_32x4d_fpn_1x_coco.py │ ├── rpn_x101_32x4d_fpn_2x_coco.py │ ├── rpn_x101_64x4d_fpn_1x_coco.py │ └── rpn_x101_64x4d_fpn_2x_coco.py ├── scratch │ ├── README.md │ ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ └── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py ├── ssd │ ├── README.md │ ├── ssd300_coco.py │ └── ssd512_coco.py └── wider_face │ ├── README.md │ └── ssd300_wider_face.py ├── demo ├── coco_test_12510.jpg ├── corruptions_sev_3.png ├── data_pipeline.png ├── demo.jpg ├── dota_demo.jpg ├── huge_image_demo.py ├── image_demo.py ├── inference_demo.ipynb ├── loss_curve.png ├── mmdet_inference_colab.ipynb ├── obbdet_show.jpg └── webcam_demo.py ├── docker └── Dockerfile ├── docs ├── Makefile ├── api.rst ├── changelog.md ├── compatibility.md ├── conf.py ├── config.md ├── getting_started.md ├── index.rst ├── install.md ├── make.bat ├── model_zoo.md ├── oriented_model_starting.md ├── projects.md ├── robustness_benchmarking.md └── tutorials │ ├── data_pipeline.md │ ├── finetune.md │ ├── new_dataset.md │ └── new_modules.md ├── figs ├── module.png ├── motivation.png └── rotate.png ├── mmdet ├── VERSION ├── __init__.py ├── apis │ ├── __init__.py │ ├── inference.py │ ├── obb │ │ ├── __init__.py │ │ └── huge_img_inference.py │ ├── test.py │ └── train.py ├── core │ ├── __init__.py │ ├── anchor │ │ ├── __init__.py │ │ ├── anchor_generator.py │ │ ├── builder.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ └── theta0_anchor_generator.py │ │ ├── point_generator.py │ │ └── utils.py │ ├── bbox │ │ ├── __init__.py │ │ ├── assigners │ │ │ ├── __init__.py │ │ │ ├── approx_max_iou_assigner.py │ │ │ ├── assign_result.py │ │ │ ├── atss_assigner.py │ │ │ ├── base_assigner.py │ │ │ ├── center_region_assigner.py │ │ │ ├── max_iou_assigner.py │ │ │ ├── obb2hbb_max_iou_assigner.py │ │ │ └── point_assigner.py │ │ ├── builder.py │ │ ├── coder │ │ │ ├── __init__.py │ │ │ ├── base_bbox_coder.py │ │ │ ├── delta_xywh_bbox_coder.py │ │ │ ├── legacy_delta_xywh_bbox_coder.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── gliding_vertex_coders.py │ │ │ │ ├── hbb2obb_delta_xywht_coder.py │ │ │ │ ├── midpoint_offset_coder.py │ │ │ │ └── obb2obb_delta_xywht_coder.py │ │ │ ├── pseudo_bbox_coder.py │ │ │ └── tblr_bbox_coder.py │ │ ├── demodata.py │ │ ├── iou_calculators │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ ├── iou2d_calculator.py │ │ │ └── obb │ │ │ │ ├── __init__.py │ │ │ │ └── obbiou_calculator.py │ │ ├── samplers │ │ │ ├── __init__.py │ │ │ ├── base_sampler.py │ │ │ ├── combined_sampler.py │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── obb_base_sampler.py │ │ │ │ ├── obb_ohem_sampler.py │ │ │ │ ├── obb_random_sampler.py │ │ │ │ └── obb_sampling_result.py │ │ │ ├── ohem_sampler.py │ │ │ ├── pseudo_sampler.py │ │ │ ├── random_sampler.py │ │ │ ├── sampling_result.py │ │ │ └── score_hlr_sampler.py │ │ ├── transforms.py │ │ └── transforms_obb │ │ │ ├── __init__.py │ │ │ ├── form.py │ │ │ ├── mapping.py │ │ │ └── misc.py │ ├── evaluation │ │ ├── __init__.py │ │ ├── bbox_overlaps.py │ │ ├── class_names.py │ │ ├── eval_hooks.py │ │ ├── mean_ap.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ ├── obb_mean_ap.py │ │ │ └── obb_recall.py │ │ └── recall.py │ ├── fp16 │ │ ├── __init__.py │ │ ├── decorators.py │ │ ├── hooks.py │ │ └── utils.py │ ├── hooks │ │ ├── __init__.py │ │ └── random_fp.py │ ├── mask │ │ ├── __init__.py │ │ ├── mask_target.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ └── obb_mask_target.py │ │ ├── structures.py │ │ └── utils.py │ ├── post_processing │ │ ├── __init__.py │ │ ├── bbox_nms.py │ │ ├── merge_augs.py │ │ └── obb │ │ │ ├── __init__.py │ │ │ ├── obb_merge_augs.py │ │ │ └── obb_nms.py │ └── utils │ │ ├── __init__.py │ │ ├── dist_utils.py │ │ └── misc.py ├── datasets │ ├── __init__.py │ ├── builder.py │ ├── cityscapes.py │ ├── coco.py │ ├── custom.py │ ├── dataset_wrappers.py │ ├── deepfashion.py │ ├── lvis.py │ ├── obb │ │ ├── __init__.py │ │ ├── dior.py │ │ ├── dota.py │ │ ├── hrsc.py │ │ ├── isaid.py │ │ ├── msra_td500.py │ │ └── rctw17.py │ ├── pipelines │ │ ├── __init__.py │ │ ├── auto_augment.py │ │ ├── compose.py │ │ ├── formating.py │ │ ├── instaboost.py │ │ ├── loading.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── dota.py │ │ │ └── misc.py │ │ ├── test_time_aug.py │ │ └── transforms.py │ ├── samplers │ │ ├── __init__.py │ │ ├── distributed_sampler.py │ │ └── group_sampler.py │ ├── voc.py │ ├── wider_face.py │ └── xml_style.py ├── models │ ├── __init__.py │ ├── backbones │ │ ├── __init__.py │ │ ├── arc_resnet.py │ │ ├── detectors_resnet.py │ │ ├── detectors_resnext.py │ │ ├── hourglass.py │ │ ├── hrnet.py │ │ ├── modules │ │ │ ├── __init__.py │ │ │ ├── adaptive_rotated_conv.py │ │ │ ├── routing_function.py │ │ │ └── weight_init.py │ │ ├── regnet.py │ │ ├── res2net.py │ │ ├── resnet.py │ │ ├── resnext.py │ │ └── ssd_vgg.py │ ├── builder.py │ ├── dense_heads │ │ ├── __init__.py │ │ ├── anchor_free_head.py │ │ ├── anchor_head.py │ │ ├── atss_head.py │ │ ├── base_dense_head.py │ │ ├── fcos_head.py │ │ ├── fovea_head.py │ │ ├── free_anchor_retina_head.py │ │ ├── fsaf_head.py │ │ ├── ga_retina_head.py │ │ ├── ga_rpn_head.py │ │ ├── gfl_head.py │ │ ├── guided_anchor_head.py │ │ ├── nasfcos_head.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ ├── obb_anchor_free_head.py │ │ │ ├── obb_anchor_head.py │ │ │ ├── obb_fcos_head.py │ │ │ ├── obb_retina_head.py │ │ │ ├── odm_head.py │ │ │ ├── oriented_rpn_head.py │ │ │ └── s2a_head.py │ │ ├── pisa_retinanet_head.py │ │ ├── pisa_ssd_head.py │ │ ├── reppoints_head.py │ │ ├── retina_head.py │ │ ├── retina_sepbn_head.py │ │ ├── rpn_head.py │ │ ├── rpn_test_mixin.py │ │ └── ssd_head.py │ ├── detectors │ │ ├── __init__.py │ │ ├── atss.py │ │ ├── base.py │ │ ├── cascade_rcnn.py │ │ ├── fast_rcnn.py │ │ ├── faster_rcnn.py │ │ ├── fcos.py │ │ ├── fovea.py │ │ ├── fsaf.py │ │ ├── gfl.py │ │ ├── grid_rcnn.py │ │ ├── htc.py │ │ ├── mask_rcnn.py │ │ ├── mask_scoring_rcnn.py │ │ ├── nasfcos.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ ├── faster_rcnn_obb.py │ │ │ ├── fcos_obb.py │ │ │ ├── gliding_vertex.py │ │ │ ├── obb_base.py │ │ │ ├── obb_rpn.py │ │ │ ├── obb_single_stage.py │ │ │ ├── obb_test_mixins.py │ │ │ ├── obb_two_stage.py │ │ │ ├── oriented_rcnn.py │ │ │ ├── retinanet_obb.py │ │ │ ├── roi_transformer.py │ │ │ └── s2anet.py │ │ ├── point_rend.py │ │ ├── reppoints_detector.py │ │ ├── retinanet.py │ │ ├── rpn.py │ │ ├── single_stage.py │ │ └── two_stage.py │ ├── losses │ │ ├── __init__.py │ │ ├── accuracy.py │ │ ├── ae_loss.py │ │ ├── balanced_l1_loss.py │ │ ├── cross_entropy_loss.py │ │ ├── focal_loss.py │ │ ├── gaussian_focal_loss.py │ │ ├── gfocal_loss.py │ │ ├── ghm_loss.py │ │ ├── iou_loss.py │ │ ├── mse_loss.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ └── poly_iou_loss.py │ │ ├── pisa_loss.py │ │ ├── smooth_l1_loss.py │ │ └── utils.py │ ├── necks │ │ ├── __init__.py │ │ ├── bfp.py │ │ ├── fpn.py │ │ ├── fpn_carafe.py │ │ ├── hrfpn.py │ │ ├── nas_fpn.py │ │ ├── nasfcos_fpn.py │ │ ├── pafpn.py │ │ └── rfp.py │ ├── roi_heads │ │ ├── __init__.py │ │ ├── base_roi_head.py │ │ ├── bbox_heads │ │ │ ├── __init__.py │ │ │ ├── bbox_head.py │ │ │ ├── convfc_bbox_head.py │ │ │ ├── double_bbox_head.py │ │ │ └── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── gv_bbox_head.py │ │ │ │ ├── obb_convfc_bbox_head.py │ │ │ │ ├── obb_double_bbox_head.py │ │ │ │ └── obbox_head.py │ │ ├── cascade_roi_head.py │ │ ├── double_roi_head.py │ │ ├── dynamic_roi_head.py │ │ ├── grid_roi_head.py │ │ ├── htc_roi_head.py │ │ ├── mask_heads │ │ │ ├── __init__.py │ │ │ ├── coarse_mask_head.py │ │ │ ├── fcn_mask_head.py │ │ │ ├── fused_semantic_head.py │ │ │ ├── grid_head.py │ │ │ ├── htc_mask_head.py │ │ │ ├── mask_point_head.py │ │ │ ├── maskiou_head.py │ │ │ └── obb │ │ │ │ ├── __init__.py │ │ │ │ └── obb_fcn_mask_head.py │ │ ├── mask_scoring_roi_head.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ ├── gv_ratio_roi_head.py │ │ │ ├── obb_base_roi_head.py │ │ │ ├── obb_double_roi_head.py │ │ │ ├── obb_standard_roi_head.py │ │ │ ├── obb_test_mixins.py │ │ │ └── roitrans_roi_head.py │ │ ├── pisa_roi_head.py │ │ ├── point_rend_roi_head.py │ │ ├── roi_extractors │ │ │ ├── __init__.py │ │ │ ├── base_roi_extractor.py │ │ │ ├── generic_roi_extractor.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── hbb_select_level_roi_extractor.py │ │ │ │ ├── obb_base_roi_extractor.py │ │ │ │ └── obb_single_level_roi_extractor.py │ │ │ └── single_level_roi_extractor.py │ │ ├── shared_heads │ │ │ ├── __init__.py │ │ │ └── res_layer.py │ │ ├── standard_roi_head.py │ │ └── test_mixins.py │ └── utils │ │ ├── __init__.py │ │ └── res_layer.py ├── ops │ ├── __init__.py │ ├── box_iou_rotated │ │ ├── __init__.py │ │ ├── box_iou_rotated_wrapper.py │ │ └── src │ │ │ ├── box_iou_rotated_cpu.cpp │ │ │ ├── box_iou_rotated_cuda.cu │ │ │ ├── box_iou_rotated_ext.cpp │ │ │ └── box_iou_rotated_utils.h │ ├── context_block.py │ ├── convex │ │ ├── __init__.py │ │ ├── convex_wrapper.py │ │ └── src │ │ │ ├── convex_cpu.cpp │ │ │ ├── convex_cuda.cu │ │ │ └── convex_ext.cpp │ ├── corner_pool │ │ ├── __init__.py │ │ ├── corner_pool.py │ │ └── src │ │ │ └── corner_pool.cpp │ ├── generalized_attention.py │ ├── masked_conv │ │ ├── __init__.py │ │ ├── masked_conv.py │ │ └── src │ │ │ ├── cuda │ │ │ ├── masked_conv2d_cuda.cpp │ │ │ └── masked_conv2d_kernel.cu │ │ │ └── masked_conv2d_ext.cpp │ ├── merge_cells.py │ ├── nms │ │ ├── __init__.py │ │ ├── nms_wrapper.py │ │ └── src │ │ │ ├── cpu │ │ │ └── nms_cpu.cpp │ │ │ ├── cuda │ │ │ ├── nms_cuda.cpp │ │ │ └── nms_kernel.cu │ │ │ └── nms_ext.cpp │ ├── nms_rotated │ │ ├── __init__.py │ │ ├── nms_rotated_wrapper.py │ │ └── src │ │ │ ├── box_iou_rotated_utils.h │ │ │ ├── nms_rotated_cpu.cpp │ │ │ ├── nms_rotated_cuda.cu │ │ │ ├── nms_rotated_ext.cpp │ │ │ ├── poly_nms_cpu.cpp │ │ │ └── poly_nms_cuda.cu │ ├── non_local.py │ ├── orn │ │ ├── __init__.py │ │ ├── functions │ │ │ ├── __init__.py │ │ │ ├── active_rotating_filter.py │ │ │ ├── rotation_invariant_encoding.py │ │ │ └── rotation_invariant_pooling.py │ │ ├── modules │ │ │ ├── ORConv.py │ │ │ └── __init__.py │ │ └── src │ │ │ ├── ActiveRotatingFilter.h │ │ │ ├── RotationInvariantEncoding.h │ │ │ ├── cpu │ │ │ ├── ActiveRotatingFilter_cpu.cpp │ │ │ ├── RotationInvariantEncoding_cpu.cpp │ │ │ └── vision.h │ │ │ ├── cuda │ │ │ ├── ActiveRotatingFilter_cuda.cu │ │ │ ├── RotationInvariantEncoding_cuda.cu │ │ │ └── vision.h │ │ │ └── vision.cpp │ ├── plugin.py │ ├── point_sample.py │ ├── roi_align │ │ ├── __init__.py │ │ ├── gradcheck.py │ │ ├── roi_align.py │ │ └── src │ │ │ ├── cpu │ │ │ └── roi_align_v2.cpp │ │ │ ├── cuda │ │ │ ├── roi_align_kernel.cu │ │ │ └── roi_align_kernel_v2.cu │ │ │ └── roi_align_ext.cpp │ ├── roi_align_rotated │ │ ├── __init__.py │ │ ├── roi_align_rotated.py │ │ └── src │ │ │ ├── roi_align_rotated_cpu.cpp │ │ │ ├── roi_align_rotated_cuda.cu │ │ │ ├── roi_align_rotated_ext.cpp │ │ │ └── temp │ │ │ ├── roi_align_rotated_cpu.cpp │ │ │ ├── roi_align_rotated_cuda.cu │ │ │ └── roi_align_rotated_ext.cpp │ ├── roi_pool │ │ ├── __init__.py │ │ ├── gradcheck.py │ │ ├── roi_pool.py │ │ └── src │ │ │ ├── cuda │ │ │ └── roi_pool_kernel.cu │ │ │ └── roi_pool_ext.cpp │ ├── sigmoid_focal_loss │ │ ├── __init__.py │ │ ├── sigmoid_focal_loss.py │ │ └── src │ │ │ ├── cuda │ │ │ └── sigmoid_focal_loss_cuda.cu │ │ │ └── sigmoid_focal_loss_ext.cpp │ ├── utils │ │ ├── __init__.py │ │ └── src │ │ │ └── compiling_info.cpp │ └── wrappers.py └── utils │ ├── __init__.py │ ├── collect_env.py │ ├── contextmanagers.py │ ├── logger.py │ ├── profiling.py │ └── util_mixins.py ├── pytest.ini ├── requirements.txt ├── requirements ├── build.txt ├── docs.txt ├── optional.txt ├── readthedocs.txt ├── runtime.txt └── tests.txt ├── setup.py ├── tests ├── async_benchmark.py ├── test_anchor.py ├── test_assigner.py ├── test_async.py ├── test_backbone.py ├── test_config.py ├── test_dataset.py ├── test_forward.py ├── test_fp16.py ├── test_heads.py ├── test_losses.py ├── test_masks.py ├── test_necks.py ├── test_ops │ ├── test_corner_pool.py │ ├── test_merge_cells.py │ ├── test_nms.py │ ├── test_soft_nms.py │ └── test_wrappers.py ├── test_pipelines │ ├── test_formatting.py │ ├── test_loading.py │ ├── test_models_aug_test.py │ └── test_transform.py ├── test_pisa_heads.py ├── test_roi_extractor.py └── test_sampler.py └── tools ├── analyze_logs.py ├── benchmark.py ├── browse_dataset.py ├── coco_error_analysis.py ├── convert_datasets ├── cityscapes.py └── pascal_voc.py ├── detectron2pytorch.py ├── dist_test.sh ├── dist_train.sh ├── fuse_conv_bn.py ├── get_flops.py ├── print_config.py ├── publish_model.py ├── pytorch2onnx.py ├── regnet2mmdet.py ├── robustness_eval.py ├── slurm_test.sh ├── slurm_train.sh ├── test.py ├── test_robustness.py ├── train.py └── upgrade_model_version.py /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the feature** 11 | 12 | **Motivation** 13 | A clear and concise description of the motivation of the feature. 14 | Ex1. It is inconvenient when [....]. 15 | Ex2. There is a recent paper [....], which is very helpful for [....]. 16 | 17 | **Related resources** 18 | If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. 19 | 20 | **Additional context** 21 | Add any other context or screenshots about the feature request here. 22 | If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/general_questions.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General questions 3 | about: Ask general questions to get help 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "BboxToolkit"] 2 | path = BboxToolkit 3 | url = git@github.com:jbwang1997/BboxToolkit.git 4 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | known_standard_library = setuptools 5 | known_first_party = mmdet 6 | known_third_party = PIL,asynctest,cityscapesscripts,cv2,matplotlib,mmcv,numpy,onnx,pycocotools,pytest,robustness_eval,roi_align,roi_pool,seaborn,six,terminaltables,torch,torchvision 7 | no_lines_before = STDLIB,LOCALFOLDER 8 | default_section = THIRDPARTY 9 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://gitlab.com/pycqa/flake8.git 3 | rev: 3.8.0 4 | hooks: 5 | - id: flake8 6 | - repo: https://github.com/asottile/seed-isort-config 7 | rev: v2.1.0 8 | hooks: 9 | - id: seed-isort-config 10 | - repo: https://github.com/timothycrosley/isort 11 | rev: 4.3.21 12 | hooks: 13 | - id: isort 14 | - repo: https://github.com/pre-commit/mirrors-yapf 15 | rev: v0.29.0 16 | hooks: 17 | - id: yapf 18 | - repo: https://github.com/pre-commit/pre-commit-hooks 19 | rev: v2.5.0 20 | hooks: 21 | - id: trailing-whitespace 22 | - id: check-yaml 23 | - id: end-of-file-fixer 24 | - id: requirements-txt-fixer 25 | - id: double-quote-string-fixer 26 | - id: check-merge-conflict 27 | - id: fix-encoding-pragma 28 | args: ["--remove"] 29 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | python: 4 | version: 3.7 5 | install: 6 | - requirements: requirements/docs.txt 7 | - requirements: requirements/readthedocs.txt 8 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | BASED_ON_STYLE = pep8 3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 5 | -------------------------------------------------------------------------------- /configs/_base_/datasets/lvis_instance.py: -------------------------------------------------------------------------------- 1 | _base_ = 'coco_instance.py' 2 | dataset_type = 'LVISDataset' 3 | data_root = 'data/lvis/' 4 | data = dict( 5 | samples_per_gpu=2, 6 | workers_per_gpu=2, 7 | train=dict( 8 | type='ClassBalancedDataset', 9 | oversample_thr=1e-3, 10 | dataset=dict( 11 | type=dataset_type, 12 | ann_file=data_root + 'annotations/lvis_v0.5_train.json', 13 | img_prefix=data_root + 'train2017/')), 14 | val=dict( 15 | type=dataset_type, 16 | ann_file=data_root + 'annotations/lvis_v0.5_val.json', 17 | img_prefix=data_root + 'val2017/'), 18 | test=dict( 19 | type=dataset_type, 20 | ann_file=data_root + 'annotations/lvis_v0.5_val.json', 21 | img_prefix=data_root + 'val2017/')) 22 | evaluation = dict(metric=['bbox', 'segm']) 23 | -------------------------------------------------------------------------------- /configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | checkpoint_config = dict(interval=1) 2 | # yapf:disable 3 | log_config = dict( 4 | interval=50, 5 | hooks=[ 6 | dict(type='TextLoggerHook'), 7 | # dict(type='TensorboardLoggerHook') 8 | ]) 9 | # yapf:enable 10 | dist_params = dict(backend='nccl') 11 | log_level = 'INFO' 12 | load_from = None 13 | resume_from = None 14 | workflow = [('train', 1)] 15 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_1x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[8, 11]) 11 | total_epochs = 12 12 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_20e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[16, 19]) 11 | total_epochs = 20 12 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_2x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[16, 22]) 11 | total_epochs = 24 12 | -------------------------------------------------------------------------------- /configs/albu_example/README.md: -------------------------------------------------------------------------------- 1 | ## Results and Models 2 | 3 | | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Download | 4 | |:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:--------:| 5 | | R-50 | pytorch | 1x | 4.4 | 16.6 | 38.0 | 34.5 |[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) | 6 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 19]) 4 | total_epochs = 20 5 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | type='CascadeRCNN', 4 | pretrained='open-mmlab://resnext101_64x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=64, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | style='pytorch')) 15 | -------------------------------------------------------------------------------- /configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | type='CascadeRCNN', 4 | pretrained='open-mmlab://resnext101_64x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=64, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | style='pytorch')) 15 | -------------------------------------------------------------------------------- /configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_roi_extractor=dict( 5 | type='SingleRoIExtractor', 6 | roi_layer=dict( 7 | _delete_=True, 8 | type='DeformRoIPoolingPack', 9 | out_size=7, 10 | out_channels=256, 11 | no_trans=False, 12 | group_size=1, 13 | trans_std=0.1), 14 | out_channels=256, 15 | featmap_strides=[4, 8, 16, 32]))) 16 | -------------------------------------------------------------------------------- /configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deformable_groups=4, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_roi_extractor=dict( 5 | type='SingleRoIExtractor', 6 | roi_layer=dict( 7 | _delete_=True, 8 | type='ModulatedDeformRoIPoolingPack', 9 | out_size=7, 10 | out_channels=256, 11 | no_trans=False, 12 | group_size=1, 13 | trans_std=0.1), 14 | out_channels=256, 15 | featmap_strides=[4, 8, 16, 32]))) 16 | -------------------------------------------------------------------------------- /configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch', 14 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 15 | stage_with_dcn=(False, True, True, True))) 16 | -------------------------------------------------------------------------------- /configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | model = dict( 7 | roi_head=dict( 8 | bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) 9 | # runtime settings 10 | total_epochs = 15 11 | -------------------------------------------------------------------------------- /configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | model = dict( 8 | backbone=dict( 9 | type='DetectoRS_ResNet', 10 | conv_cfg=dict(type='ConvAWS'), 11 | output_img=True), 12 | neck=dict( 13 | type='RFP', 14 | rfp_steps=2, 15 | aspp_out_channels=64, 16 | aspp_dilations=(1, 3, 6, 1), 17 | rfp_backbone=dict( 18 | rfp_inplanes=256, 19 | type='DetectoRS_ResNet', 20 | depth=50, 21 | num_stages=4, 22 | out_indices=(0, 1, 2, 3), 23 | frozen_stages=1, 24 | norm_cfg=dict(type='BN', requires_grad=True), 25 | norm_eval=True, 26 | conv_cfg=dict(type='ConvAWS'), 27 | pretrained='torchvision://resnet50', 28 | style='pytorch'))) 29 | -------------------------------------------------------------------------------- /configs/detectors/cascade_rcnn_r50_sac_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | model = dict( 8 | backbone=dict( 9 | type='DetectoRS_ResNet', 10 | conv_cfg=dict(type='ConvAWS'), 11 | sac=dict(type='SAC', use_deform=True), 12 | stage_with_sac=(False, True, True, True))) 13 | -------------------------------------------------------------------------------- /configs/detectors/htc_r50_rfp_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | backbone=dict( 5 | type='DetectoRS_ResNet', 6 | conv_cfg=dict(type='ConvAWS'), 7 | output_img=True), 8 | neck=dict( 9 | type='RFP', 10 | rfp_steps=2, 11 | aspp_out_channels=64, 12 | aspp_dilations=(1, 3, 6, 1), 13 | rfp_backbone=dict( 14 | rfp_inplanes=256, 15 | type='DetectoRS_ResNet', 16 | depth=50, 17 | num_stages=4, 18 | out_indices=(0, 1, 2, 3), 19 | frozen_stages=1, 20 | norm_cfg=dict(type='BN', requires_grad=True), 21 | norm_eval=True, 22 | conv_cfg=dict(type='ConvAWS'), 23 | pretrained='torchvision://resnet50', 24 | style='pytorch'))) 25 | -------------------------------------------------------------------------------- /configs/detectors/htc_r50_sac_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | backbone=dict( 5 | type='DetectoRS_ResNet', 6 | conv_cfg=dict(type='ConvAWS'), 7 | sac=dict(type='SAC', use_deform=True), 8 | stage_with_sac=(False, True, True, True))) 9 | -------------------------------------------------------------------------------- /configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | type='DoubleHeadRoIHead', 5 | reg_roi_scale_factor=1.3, 6 | bbox_head=dict( 7 | _delete_=True, 8 | type='DoubleConvFCBBoxHead', 9 | num_convs=4, 10 | num_fcs=2, 11 | in_channels=256, 12 | conv_out_channels=1024, 13 | fc_out_channels=1024, 14 | roi_feat_size=7, 15 | num_classes=80, 16 | bbox_coder=dict( 17 | type='DeltaXYWHBBoxCoder', 18 | target_means=[0., 0., 0., 0.], 19 | target_stds=[0.1, 0.1, 0.2, 0.2]), 20 | reg_class_agnostic=False, 21 | loss_cls=dict( 22 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), 23 | loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) 24 | -------------------------------------------------------------------------------- /configs/dynamic_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training 2 | 3 | ## Introduction 4 | 5 | ``` 6 | @article{DynamicRCNN, 7 | author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen}, 8 | title = {Dynamic {R-CNN}: Towards High Quality Object Detection via Dynamic Training}, 9 | journal = {arXiv preprint arXiv:2004.06002}, 10 | year = {2020} 11 | } 12 | ``` 13 | 14 | ## Results and Models 15 | 16 | | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Download | 17 | |:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:--------:| 18 | | R-50 | pytorch | 1x | 3.8 | | 38.9 | [model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x_20200618_095048.log.json) | 19 | -------------------------------------------------------------------------------- /configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict( 6 | type='GeneralizedAttention', 7 | spatial_range=-1, 8 | num_heads=8, 9 | attention_type='0010', 10 | kv_stride=2), 11 | stages=(False, False, True, True), 12 | position='after_conv2') 13 | ])) 14 | -------------------------------------------------------------------------------- /configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | plugins=[ 5 | dict( 6 | cfg=dict( 7 | type='GeneralizedAttention', 8 | spatial_range=-1, 9 | num_heads=8, 10 | attention_type='0010', 11 | kv_stride=2), 12 | stages=(False, False, True, True), 13 | position='after_conv2') 14 | ], 15 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 16 | stage_with_dcn=(False, True, True, True))) 17 | -------------------------------------------------------------------------------- /configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict( 6 | type='GeneralizedAttention', 7 | spatial_range=-1, 8 | num_heads=8, 9 | attention_type='1111', 10 | kv_stride=2), 11 | stages=(False, False, True, True), 12 | position='after_conv2') 13 | ])) 14 | -------------------------------------------------------------------------------- /configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | plugins=[ 5 | dict( 6 | cfg=dict( 7 | type='GeneralizedAttention', 8 | spatial_range=-1, 9 | num_heads=8, 10 | attention_type='1111', 11 | kv_stride=2), 12 | stages=(False, False, True, True), 13 | position='after_conv2') 14 | ], 15 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 16 | stage_with_dcn=(False, True, True, True))) 17 | -------------------------------------------------------------------------------- /configs/fast_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Fast R-CNN 2 | 3 | ## Introduction 4 | ``` 5 | @inproceedings{girshick2015fast, 6 | title={Fast r-cnn}, 7 | author={Girshick, Ross}, 8 | booktitle={Proceedings of the IEEE international conference on computer vision}, 9 | year={2015} 10 | } 11 | ``` 12 | 13 | ## Results and models 14 | -------------------------------------------------------------------------------- /configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | classes = ('person', 'bicycle', 'car') 3 | data = dict( 4 | train=dict(classes=classes), 5 | val=dict(classes=classes), 6 | test=dict(classes=classes)) 7 | # TODO: Update model url after bumping to V2.0 8 | load_from = 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth' # noqa 9 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | classes = ('person', ) 3 | data = dict( 4 | train=dict(classes=classes), 5 | val=dict(classes=classes), 6 | test=dict(classes=classes)) 7 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='IoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | train_cfg = dict(rcnn=dict(sampler=dict(type='OHEMSampler'))) 3 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | test_cfg = dict( 8 | rcnn=dict( 9 | score_thr=0.05, 10 | nms=dict(type='soft_nms', iou_thr=0.5), 11 | max_per_img=100)) 12 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5)) 3 | -------------------------------------------------------------------------------- /configs/fcos/fcos_r101_caffe_fpn_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/fcos/fcos_r101_caffe_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py'] 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(depth=101), 5 | bbox_head=dict( 6 | with_deform=True, 7 | norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) 8 | # learning policy 9 | lr_config = dict(step=[16, 22]) 10 | total_epochs = 24 11 | -------------------------------------------------------------------------------- /configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict( 3 | bbox_head=dict( 4 | with_deform=True, 5 | norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) 6 | # learning policy 7 | lr_config = dict(step=[16, 22]) 8 | total_epochs = 24 9 | optimizer_config = dict( 10 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 11 | -------------------------------------------------------------------------------- /configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict( 3 | bbox_head=dict( 4 | with_deform=True, 5 | norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) 6 | img_norm_cfg = dict( 7 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations', with_bbox=True), 11 | dict( 12 | type='Resize', 13 | img_scale=[(1333, 640), (1333, 800)], 14 | multiscale_mode='value', 15 | keep_ratio=True), 16 | dict(type='RandomFlip', flip_ratio=0.5), 17 | dict(type='Normalize', **img_norm_cfg), 18 | dict(type='Pad', size_divisor=32), 19 | dict(type='DefaultFormatBundle'), 20 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), 21 | ] 22 | data = dict(train=dict(pipeline=train_pipeline)) 23 | # learning policy 24 | lr_config = dict(step=[16, 22]) 25 | total_epochs = 24 26 | -------------------------------------------------------------------------------- /configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | bbox_head=dict( 4 | _delete_=True, 5 | type='FreeAnchorRetinaHead', 6 | num_classes=80, 7 | in_channels=256, 8 | stacked_convs=4, 9 | feat_channels=256, 10 | anchor_generator=dict( 11 | type='AnchorGenerator', 12 | octave_base_scale=4, 13 | scales_per_octave=3, 14 | ratios=[0.5, 1.0, 2.0], 15 | strides=[8, 16, 32, 64, 128]), 16 | bbox_coder=dict( 17 | type='DeltaXYWHBBoxCoder', 18 | target_means=[.0, .0, .0, .0], 19 | target_stds=[0.1, 0.1, 0.2, 0.2]), 20 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75))) 21 | optimizer_config = dict( 22 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 23 | -------------------------------------------------------------------------------- /configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | style='pytorch')) 13 | -------------------------------------------------------------------------------- /configs/fsaf/fsaf_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fsaf_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fsaf_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 16), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 4), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 16), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 4), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict( 5 | type='ResNet', 6 | depth=101, 7 | num_stages=4, 8 | out_indices=(0, 1, 2, 3), 9 | frozen_stages=1, 10 | norm_cfg=dict(type='BN', requires_grad=True), 11 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 12 | stage_with_dcn=(False, True, True, True), 13 | norm_eval=True, 14 | style='pytorch')) 15 | -------------------------------------------------------------------------------- /configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict( 5 | type='ResNet', 6 | depth=101, 7 | num_stages=4, 8 | out_indices=(0, 1, 2, 3), 9 | frozen_stages=1, 10 | norm_cfg=dict(type='BN', requires_grad=True), 11 | norm_eval=True, 12 | style='pytorch')) 13 | -------------------------------------------------------------------------------- /configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | # multi-scale training 6 | img_norm_cfg = dict( 7 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations', with_bbox=True), 11 | dict( 12 | type='Resize', 13 | img_scale=[(1333, 480), (1333, 800)], 14 | multiscale_mode='range', 15 | keep_ratio=True), 16 | dict(type='RandomFlip', flip_ratio=0.5), 17 | dict(type='Normalize', **img_norm_cfg), 18 | dict(type='Pad', size_divisor=32), 19 | dict(type='DefaultFormatBundle'), 20 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), 21 | ] 22 | data = dict(train=dict(pipeline=train_pipeline)) 23 | -------------------------------------------------------------------------------- /configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | type='GFL', 4 | pretrained='open-mmlab://resnext101_32x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=32, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 15 | stage_with_dcn=(False, False, True, True), 16 | norm_eval=True, 17 | style='pytorch')) 18 | -------------------------------------------------------------------------------- /configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | type='GFL', 4 | pretrained='open-mmlab://resnext101_32x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=32, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | norm_eval=True, 15 | style='pytorch')) 16 | -------------------------------------------------------------------------------- /configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | bbox_head=dict( 4 | loss_cls=dict( 5 | _delete_=True, 6 | type='GHMC', 7 | bins=30, 8 | momentum=0.75, 9 | use_sigmoid=True, 10 | loss_weight=1.0), 11 | loss_bbox=dict( 12 | _delete_=True, 13 | type='GHMR', 14 | mu=0.02, 15 | bins=10, 16 | momentum=0.7, 17 | loss_weight=10.0))) 18 | optimizer_config = dict( 19 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 20 | -------------------------------------------------------------------------------- /configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnet50_gn_ws', 6 | backbone=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 7 | neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 8 | roi_head=dict( 9 | bbox_head=dict( 10 | type='Shared4Conv1FCBBoxHead', 11 | conv_out_channels=256, 12 | conv_cfg=conv_cfg, 13 | norm_cfg=norm_cfg))) 14 | -------------------------------------------------------------------------------- /configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', 6 | backbone=dict( 7 | type='ResNeXt', 8 | depth=101, 9 | groups=32, 10 | base_width=4, 11 | num_stages=4, 12 | out_indices=(0, 1, 2, 3), 13 | frozen_stages=1, 14 | style='pytorch', 15 | conv_cfg=conv_cfg, 16 | norm_cfg=norm_cfg)) 17 | -------------------------------------------------------------------------------- /configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws', 6 | backbone=dict( 7 | type='ResNeXt', 8 | depth=50, 9 | groups=32, 10 | base_width=4, 11 | num_stages=4, 12 | out_indices=(0, 1, 2, 3), 13 | frozen_stages=1, 14 | style='pytorch', 15 | conv_cfg=conv_cfg, 16 | norm_cfg=norm_cfg)) 17 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnet50_gn_ws', 6 | backbone=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 7 | neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 8 | roi_head=dict( 9 | bbox_head=dict( 10 | type='Shared4Conv1FCBBoxHead', 11 | conv_out_channels=256, 12 | conv_cfg=conv_cfg, 13 | norm_cfg=norm_cfg), 14 | mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg))) 15 | # learning policy 16 | lr_config = dict(step=[16, 22]) 17 | total_epochs = 24 18 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | # model settings 3 | conv_cfg = dict(type='ConvWS') 4 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 5 | model = dict( 6 | pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', 7 | backbone=dict( 8 | type='ResNeXt', 9 | depth=101, 10 | groups=32, 11 | base_width=4, 12 | num_stages=4, 13 | out_indices=(0, 1, 2, 3), 14 | frozen_stages=1, 15 | style='pytorch', 16 | conv_cfg=conv_cfg, 17 | norm_cfg=norm_cfg)) 18 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | # model settings 3 | conv_cfg = dict(type='ConvWS') 4 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 5 | model = dict( 6 | pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws', 7 | backbone=dict( 8 | type='ResNeXt', 9 | depth=50, 10 | groups=32, 11 | base_width=4, 12 | num_stages=4, 13 | out_indices=(0, 1, 2, 3), 14 | frozen_stages=1, 15 | style='pytorch', 16 | conv_cfg=conv_cfg, 17 | norm_cfg=norm_cfg)) 18 | -------------------------------------------------------------------------------- /configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_gn', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 3 | model = dict( 4 | pretrained='open-mmlab://contrib/resnet50_gn', 5 | backbone=dict(norm_cfg=norm_cfg), 6 | neck=dict(norm_cfg=norm_cfg), 7 | roi_head=dict( 8 | bbox_head=dict( 9 | type='Shared4Conv1FCBBoxHead', 10 | conv_out_channels=256, 11 | norm_cfg=norm_cfg), 12 | mask_head=dict(norm_cfg=norm_cfg))) 13 | # learning policy 14 | lr_config = dict(step=[16, 22]) 15 | total_epochs = 24 16 | -------------------------------------------------------------------------------- /configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' 2 | 3 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = ['../grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py'] 2 | # learning policy 3 | lr_config = dict( 4 | policy='step', 5 | warmup='linear', 6 | warmup_iters=500, 7 | warmup_ratio=0.001, 8 | step=[8, 11]) 9 | checkpoint_config = dict(interval=1) 10 | # runtime settings 11 | total_epochs = 12 12 | -------------------------------------------------------------------------------- /configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | style='pytorch')) 13 | # optimizer 14 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 15 | optimizer_config = dict(grad_clip=None) 16 | # learning policy 17 | lr_config = dict( 18 | policy='step', 19 | warmup='linear', 20 | warmup_iters=3665, 21 | warmup_ratio=1.0 / 80, 22 | step=[17, 23]) 23 | total_epochs = 25 24 | -------------------------------------------------------------------------------- /configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | style='pytorch')) 13 | -------------------------------------------------------------------------------- /configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | roi_head=dict( 5 | bbox_roi_extractor=dict( 6 | type='GenericRoIExtractor', 7 | aggregation='sum', 8 | roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), 9 | out_channels=256, 10 | featmap_strides=[4, 8, 16, 32], 11 | pre_cfg=dict( 12 | type='ConvModule', 13 | in_channels=256, 14 | out_channels=256, 15 | kernel_size=5, 16 | padding=2, 17 | inplace=False, 18 | ), 19 | post_cfg=dict( 20 | type='GeneralizedAttention', 21 | in_channels=256, 22 | spatial_range=-1, 23 | num_heads=6, 24 | attention_type='0100', 25 | kv_stride=2)))) 26 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_faster_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_faster_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://detectron2/resnet101_caffe', 5 | backbone=dict(depth=101)) 6 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w18', 5 | backbone=dict( 6 | extra=dict( 7 | stage2=dict(num_channels=(18, 36)), 8 | stage3=dict(num_channels=(18, 36, 72)), 9 | stage4=dict(num_channels=(18, 36, 72, 144)))), 10 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 11 | -------------------------------------------------------------------------------- /configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w40', 5 | backbone=dict( 6 | type='HRNet', 7 | extra=dict( 8 | stage2=dict(num_channels=(40, 80)), 9 | stage3=dict(num_channels=(40, 80, 160)), 10 | stage4=dict(num_channels=(40, 80, 160, 320)))), 11 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 12 | -------------------------------------------------------------------------------- /configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w18', 5 | backbone=dict( 6 | extra=dict( 7 | stage2=dict(num_channels=(18, 36)), 8 | stage3=dict(num_channels=(18, 36, 72)), 9 | stage4=dict(num_channels=(18, 36, 72, 144)))), 10 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 11 | -------------------------------------------------------------------------------- /configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w40', 5 | backbone=dict( 6 | type='HRNet', 7 | extra=dict( 8 | stage2=dict(num_channels=(40, 80)), 9 | stage3=dict(num_channels=(40, 80, 160)), 10 | stage4=dict(num_channels=(40, 80, 160, 320)))), 11 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 12 | -------------------------------------------------------------------------------- /configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w18', 5 | backbone=dict( 6 | extra=dict( 7 | stage2=dict(num_channels=(18, 36)), 8 | stage3=dict(num_channels=(18, 36, 72)), 9 | stage4=dict(num_channels=(18, 36, 72, 144)))), 10 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 11 | -------------------------------------------------------------------------------- /configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w18_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /configs/hrnet/htc_hrnetv2p_w18_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_hrnetv2p_w32_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /configs/hrnet/htc_hrnetv2p_w40_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_hrnetv2p_w32_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /configs/hrnet/htc_hrnetv2p_w40_28e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_hrnetv2p_w40_20e_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[24, 27]) 4 | total_epochs = 28 5 | -------------------------------------------------------------------------------- /configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[24, 27]) 4 | total_epochs = 28 5 | -------------------------------------------------------------------------------- /configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/htc/htc_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | # learning policy 4 | lr_config = dict(step=[16, 19]) 5 | total_epochs = 20 6 | -------------------------------------------------------------------------------- /configs/htc/htc_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 19]) 4 | total_epochs = 20 5 | -------------------------------------------------------------------------------- /configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | norm_eval=True, 14 | style='pytorch')) 15 | data = dict(samples_per_gpu=1, workers_per_gpu=1) 16 | # learning policy 17 | lr_config = dict(step=[16, 19]) 18 | total_epochs = 20 19 | -------------------------------------------------------------------------------- /configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | norm_eval=True, 14 | style='pytorch')) 15 | data = dict(samples_per_gpu=1, workers_per_gpu=1) 16 | # learning policy 17 | lr_config = dict(step=[16, 19]) 18 | total_epochs = 20 19 | -------------------------------------------------------------------------------- /configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | 3 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/retinanet_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | model = dict( 7 | bbox_head=dict( 8 | type='RetinaHead', 9 | anchor_generator=dict( 10 | type='LegacyAnchorGenerator', 11 | center_offset=0.5, 12 | octave_base_scale=4, 13 | scales_per_octave=3, 14 | ratios=[0.5, 1.0, 2.0], 15 | strides=[8, 16, 32, 64, 128]), 16 | bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), 17 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) 18 | -------------------------------------------------------------------------------- /configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | neck=[ 5 | dict( 6 | type='FPN', 7 | in_channels=[256, 512, 1024, 2048], 8 | out_channels=256, 9 | start_level=1, 10 | add_extra_convs='on_input', 11 | num_outs=5), 12 | dict( 13 | type='BFP', 14 | in_channels=256, 15 | num_levels=5, 16 | refine_level=1, 17 | refine_type='non_local') 18 | ], 19 | bbox_head=dict( 20 | loss_bbox=dict( 21 | _delete_=True, 22 | type='BalancedL1Loss', 23 | alpha=0.5, 24 | gamma=1.5, 25 | beta=0.11, 26 | loss_weight=1.0))) 27 | -------------------------------------------------------------------------------- /configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | img_norm_cfg = dict( 8 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 9 | train_pipeline = [ 10 | dict(type='LoadImageFromFile'), 11 | dict( 12 | type='LoadAnnotations', 13 | with_bbox=True, 14 | with_mask=True, 15 | poly2mask=False), 16 | dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), 17 | dict(type='RandomFlip', flip_ratio=0.5), 18 | dict(type='Normalize', **img_norm_cfg), 19 | dict(type='Pad', size_divisor=32), 20 | dict(type='DefaultFormatBundle'), 21 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), 22 | ] 23 | data = dict(train=dict(pipeline=train_pipeline)) 24 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | type='MaskScoringRCNN', 4 | roi_head=dict( 5 | type='MaskScoringRoIHead', 6 | mask_iou_head=dict( 7 | type='MaskIoUHead', 8 | num_convs=4, 9 | num_fcs=2, 10 | roi_feat_size=14, 11 | in_channels=256, 12 | conv_out_channels=256, 13 | fc_out_channels=1024, 14 | num_classes=80))) 15 | # model training and testing settings 16 | train_cfg = dict(rcnn=dict(mask_thr_binary=0.5)) 17 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | type='MaskScoringRCNN', 4 | roi_head=dict( 5 | type='MaskScoringRoIHead', 6 | mask_iou_head=dict( 7 | type='MaskIoUHead', 8 | num_convs=4, 9 | num_fcs=2, 10 | roi_feat_size=14, 11 | in_channels=256, 12 | conv_out_channels=256, 13 | fc_out_channels=1024, 14 | num_classes=80))) 15 | # model training and testing settings 16 | train_cfg = dict(rcnn=dict(mask_thr_binary=0.5)) 17 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/obb/_base_/schedules/schedule_1x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[8, 11]) 11 | total_epochs = 12 12 | -------------------------------------------------------------------------------- /configs/obb/_base_/schedules/schedule_2x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[16, 22]) 11 | total_epochs = 24 12 | -------------------------------------------------------------------------------- /configs/obb/_base_/schedules/schedule_3x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[24, 33]) 11 | total_epochs = 36 12 | -------------------------------------------------------------------------------- /configs/obb/arc/arc_orcnn_r101fpn1x_ss_dota10_RxFFF_n4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | './orcnn_r101fpn1x_ss_dota10.py', 3 | ] 4 | 5 | model = dict( 6 | pretrained='pretrained/ARC_ResNet101_xFFF_n4.pth', 7 | backbone=dict( 8 | type='ARCResNet', 9 | replace = [ 10 | ['x'], 11 | ['0', '1', '2', '3'], 12 | ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22'], 13 | ['0', '1', '2'] 14 | ], 15 | kernel_number = 4, 16 | ) 17 | ) 18 | 19 | optimizer = dict( 20 | paramwise_cfg=dict( 21 | custom_keys={ 22 | 'backbone': dict(lr_mult=0.5) 23 | } 24 | ) 25 | ) -------------------------------------------------------------------------------- /configs/obb/arc/arc_orcnn_r50fpn1x_ss_dota10_RxFFF_n4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | './orcnn_r50fpn1x_ss_dota10.py', 3 | ] 4 | 5 | model = dict( 6 | pretrained='pretrained/ARC_ResNet50_xFFF_n4.pth', 7 | backbone=dict( 8 | type='ARCResNet', 9 | replace = [ 10 | ['x'], 11 | ['0', '1', '2', '3'], 12 | ['0', '1', '2', '3', '4', '5'], 13 | ['0', '1', '2'] 14 | ], 15 | kernel_number = 4, 16 | ) 17 | ) 18 | 19 | optimizer = dict( 20 | paramwise_cfg=dict( 21 | custom_keys={ 22 | 'backbone': dict(lr_mult=0.5) 23 | } 24 | ) 25 | ) -------------------------------------------------------------------------------- /configs/obb/arc/orcnn_r101fpn1x_ss_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './orcnn_r50fpn1x_ss_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) -------------------------------------------------------------------------------- /configs/obb/arc/orcnn_r50fpn1x_ss_dota10.py: -------------------------------------------------------------------------------- 1 | 2 | _base_ = [ 3 | '../oriented_rcnn/faster_rcnn_orpn_r50_fpn_1x_dota10.py', 4 | ] 5 | 6 | data = dict( 7 | samples_per_gpu=2, 8 | workers_per_gpu=2, 9 | ) 10 | 11 | checkpoint_config = dict(interval=1, max_keep_ckpts=1) -------------------------------------------------------------------------------- /configs/obb/atss_obb/README.md: -------------------------------------------------------------------------------- 1 | # Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection 2 | 3 | 4 | ## Introduction 5 | 6 | Oriented form of ATSS model. 7 | 8 | ``` 9 | @article{zhang2019bridging, 10 | title = {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection}, 11 | author = {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.}, 12 | journal = {arXiv preprint arXiv:1912.02424}, 13 | year = {2019} 14 | } 15 | ``` 16 | 17 | to be continue!! 18 | -------------------------------------------------------------------------------- /configs/obb/double_heads_obb/README.md: -------------------------------------------------------------------------------- 1 | # Rethinking Classification and Localization for Object Detection 2 | 3 | ## Introduction 4 | 5 | Oriented form of double head model 6 | 7 | ``` 8 | @article{wu2019rethinking, 9 | title={Rethinking Classification and Localization for Object Detection}, 10 | author={Yue Wu and Yinpeng Chen and Lu Yuan and Zicheng Liu and Lijuan Wang and Hongzhi Li and Yun Fu}, 11 | year={2019}, 12 | eprint={1904.06493}, 13 | archivePrefix={arXiv}, 14 | primaryClass={cs.CV} 15 | } 16 | ``` 17 | 18 | ## Results and models 19 | to be continue ! 20 | -------------------------------------------------------------------------------- /configs/obb/faster_rcnn_obb/faster_rcnn_obb_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_obb_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/faster_rcnn_obb/faster_rcnn_obb_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_obb_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/gliding_vertex/gliding_vertex_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './gliding_vertex_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/gliding_vertex/gliding_vertex_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './gliding_vertex_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/mask_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # [Mask R-CNN](https://arxiv.org/abs/1703.06870) 2 | 3 | ## Introduction 4 | 5 | The Mask R-CNN of orietned form. 6 | 7 | ``` 8 | @article{He_2017, 9 | title={Mask R-CNN}, 10 | journal={2017 IEEE International Conference on Computer Vision (ICCV)}, 11 | publisher={IEEE}, 12 | author={He, Kaiming and Gkioxari, Georgia and Dollar, Piotr and Girshick, Ross}, 13 | year={2017}, 14 | month={Oct} 15 | } 16 | ``` 17 | 18 | ## Datasets 19 | 20 | First, Users need to pre-process isaid dataset, Please follow the tutorial of [iSAID_Devkit](https://github.com/CAPTAIN-WHU/iSAID_Devkit) to split images and get the coco format json file. 21 | 22 | ## Results and models 23 | To be continue 24 | -------------------------------------------------------------------------------- /configs/obb/oriented_rcnn/faster_rcnn_orpn_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_orpn_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/oriented_rcnn/faster_rcnn_orpn_r101_fpn_1x_ms_rr_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_orpn_r50_fpn_1x_ms_rr_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/oriented_rcnn/faster_rcnn_orpn_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_orpn_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/oriented_rcnn/illustration.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/configs/obb/oriented_rcnn/illustration.jpg -------------------------------------------------------------------------------- /configs/obb/oriented_rcnn/orpn_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './orpn_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/oriented_rcnn_beyond/illustration.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/configs/obb/oriented_rcnn_beyond/illustration.jpg -------------------------------------------------------------------------------- /configs/obb/random_fp/README.md: -------------------------------------------------------------------------------- 1 | # Randomly False Patches 2 | 3 | Currently, researchers only utilze the patches containing objects (True Patch, TP) to train models on large-scale aerial images. However, the patches with no object (False Patches, FP) are equally valuable for models. FPs include more scenes than TPs, which will benefit model to suppress false positives. but in most cases, the number of FP is far beyond the number of TP. Directly training TPs and FPs will cost unendurable time. Thus, to fully utilze FPs, we add part FPs in dataset and shuffle it in each epoch. 4 | 5 | # Results 6 | 7 | **note**: The argument `filter_empty` in split config should be setted to `false` to keep FP information in splitting phase. 8 | 9 | To be continue!!! -------------------------------------------------------------------------------- /configs/obb/retinanet_obb/retinanet_obb_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/retinanet_obb/retinanet_obb_r101_fpn_2x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r101_fpn_1x_dota10.py' 2 | 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/obb/retinanet_obb/retinanet_obb_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/retinanet_obb/retinanet_obb_r50_fpn_2x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r50_fpn_1x_dota10.py' 2 | 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/obb/roi_transformer/README.md: -------------------------------------------------------------------------------- 1 | # [Learning RoI Transformer for Oriented Object Detection in Aerial Images](https://openaccess.thecvf.com/content_CVPR_2019/papers/Ding_Learning_RoI_Transformer_for_Oriented_Object_Detection_in_Aerial_Images_CVPR_2019_paper.pdf#:~:text=The%20core%20idea%20of%20RoI%20Transformer%20is%20to,embed-%20ded%20into%20detectors%20for%20oriented%20object%20detection.) 2 | 3 | ## Introduction 4 | ``` 5 | @inproceedings{RN8, 6 | author = {Ding, Jian and Xue, Nan and Long, Yang and Xia, Gui-Song and Lu, Qikai}, 7 | title = {Learning RoI Transformer for Oriented Object Detection in Aerial Images}, 8 | publisher = {IEEE}, 9 | DOI = {10.1109/cvpr.2019.00296}, 10 | url = {https://dx.doi.org/10.1109/cvpr.2019.00296}, 11 | type = {Conference Proceedings} 12 | } 13 | ``` 14 | 15 | ## Results and models 16 | 17 | to be continue!!! 18 | -------------------------------------------------------------------------------- /configs/obb/roi_transformer/faster_rcnn_roitrans_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_roitrans_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/obb/roi_transformer/faster_rcnn_roitrans_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_roitrans_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | neck=dict( 5 | type='PAFPN', 6 | in_channels=[256, 512, 1024, 2048], 7 | out_channels=256, 8 | num_outs=5)) 9 | -------------------------------------------------------------------------------- /configs/pascal_voc/README.md: -------------------------------------------------------------------------------- 1 | ## Results and Models 2 | 3 | | Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Download | 4 | |:------------:|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:--------:| 5 | | Faster R-CNN | R-50 | pytorch | 1x | 2.6 | - | 79.5 |[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/20200623_015208.log.json) | 6 | | Retinanet | R-50 | pytorch | 1x | 2.1 | - | 77.3 |[model](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200617-47cbdd0e.pth) | [log](https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200616_014642.log.json) | 7 | -------------------------------------------------------------------------------- /configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', 3 | '../_base_/default_runtime.py' 4 | ] 5 | model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) 8 | optimizer_config = dict(grad_clip=None) 9 | # learning policy 10 | # actual epoch = 3 * 3 = 9 11 | lr_config = dict(policy='step', step=[3]) 12 | # runtime settings 13 | total_epochs = 4 # actual epoch = 4 * 3 = 12 14 | -------------------------------------------------------------------------------- /configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', 3 | '../_base_/default_runtime.py' 4 | ] 5 | model = dict(bbox_head=dict(num_classes=20)) 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) 8 | optimizer_config = dict(grad_clip=None) 9 | # learning policy 10 | # actual epoch = 3 * 3 = 9 11 | lr_config = dict(policy='step', step=[3]) 12 | # runtime settings 13 | total_epochs = 4 # actual epoch = 4 * 3 = 12 14 | -------------------------------------------------------------------------------- /configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | bbox_head=dict( 5 | type='PISARetinaHead', 6 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) 7 | 8 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 9 | -------------------------------------------------------------------------------- /configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | bbox_head=dict( 5 | type='PISARetinaHead', 6 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) 7 | 8 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 9 | -------------------------------------------------------------------------------- /configs/pisa/pisa_ssd300_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../ssd/ssd300_coco.py' 2 | 3 | model = dict(bbox_head=dict(type='PISASSDHead')) 4 | 5 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 6 | 7 | optimizer_config = dict( 8 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 9 | -------------------------------------------------------------------------------- /configs/pisa/pisa_ssd512_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../ssd/ssd512_coco.py' 2 | 3 | model = dict(bbox_head=dict(type='PISASSDHead')) 4 | 5 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 6 | 7 | optimizer_config = dict( 8 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 9 | -------------------------------------------------------------------------------- /configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_12gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_12gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[224, 448, 896, 2240], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_4.0gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_4.0gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[80, 240, 560, 1360], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_6.4gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_6.4gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[168, 392, 784, 1624], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_8.0gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_8.0gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[80, 240, 720, 1920], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_1.6gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_1.6gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[72, 168, 408, 912], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_800mf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_800mf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[64, 128, 288, 672], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) 3 | -------------------------------------------------------------------------------- /configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) 3 | # training and testing settings 4 | train_cfg = dict( 5 | init=dict( 6 | assigner=dict( 7 | _delete_=True, 8 | type='MaxIoUAssigner', 9 | pos_iou_thr=0.5, 10 | neg_iou_thr=0.4, 11 | min_pos_iou=0, 12 | ignore_iof_thr=-1))) 13 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/configs/reppoints/reppoints.png -------------------------------------------------------------------------------- /configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax')) 3 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict( 5 | depth=101, 6 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 7 | stage_with_dcn=(False, True, True, True))) 8 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_1x_coco.py' 2 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 3 | model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) 4 | optimizer = dict(lr=0.01) 5 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | lr_config = dict(step=[16, 22]) 3 | total_epochs = 24 4 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch', 14 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 15 | stage_with_dcn=(False, True, True, True))) 16 | -------------------------------------------------------------------------------- /configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='partial_minmax')) 3 | -------------------------------------------------------------------------------- /configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /configs/res2net/htc_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | # learning policy 6 | lr_config = dict(step=[16, 19]) 7 | total_epochs = 20 8 | -------------------------------------------------------------------------------- /configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/retinanet_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) 8 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/rpn/rpn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /configs/rpn/rpn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/rpn/rpn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /configs/rpn/rpn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', 3 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 4 | ] 5 | img_norm_cfg = dict( 6 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 7 | train_pipeline = [ 8 | dict(type='LoadImageFromFile'), 9 | dict(type='LoadAnnotations', with_bbox=True, with_label=False), 10 | dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), 11 | dict(type='RandomFlip', flip_ratio=0.5), 12 | dict(type='Normalize', **img_norm_cfg), 13 | dict(type='Pad', size_divisor=32), 14 | dict(type='DefaultFormatBundle'), 15 | dict(type='Collect', keys=['img', 'gt_bboxes']), 16 | ] 17 | data = dict(train=dict(pipeline=train_pipeline)) 18 | evaluation = dict(interval=1, metric='proposal_fast') 19 | -------------------------------------------------------------------------------- /configs/rpn/rpn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 7 | model = dict( 8 | pretrained=None, 9 | backbone=dict( 10 | frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg), 11 | neck=dict(norm_cfg=norm_cfg), 12 | roi_head=dict( 13 | bbox_head=dict( 14 | type='Shared4Conv1FCBBoxHead', 15 | conv_out_channels=256, 16 | norm_cfg=norm_cfg))) 17 | # optimizer 18 | optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) 19 | optimizer_config = dict(_delete_=True, grad_clip=None) 20 | # learning policy 21 | lr_config = dict(warmup_ratio=0.1, step=[65, 71]) 22 | total_epochs = 73 23 | -------------------------------------------------------------------------------- /configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 7 | model = dict( 8 | pretrained=None, 9 | backbone=dict( 10 | frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg), 11 | neck=dict(norm_cfg=norm_cfg), 12 | roi_head=dict( 13 | bbox_head=dict( 14 | type='Shared4Conv1FCBBoxHead', 15 | conv_out_channels=256, 16 | norm_cfg=norm_cfg), 17 | mask_head=dict(norm_cfg=norm_cfg))) 18 | # optimizer 19 | optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) 20 | optimizer_config = dict(_delete_=True, grad_clip=None) 21 | # learning policy 22 | lr_config = dict(warmup_ratio=0.1, step=[65, 71]) 23 | total_epochs = 73 24 | -------------------------------------------------------------------------------- /configs/wider_face/ssd300_wider_face.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', 3 | '../_base_/default_runtime.py' 4 | ] 5 | model = dict(bbox_head=dict(num_classes=1)) 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) 8 | optimizer_config = dict() 9 | # learning policy 10 | lr_config = dict( 11 | policy='step', 12 | warmup='linear', 13 | warmup_iters=1000, 14 | warmup_ratio=0.001, 15 | step=[16, 20]) 16 | # runtime settings 17 | total_epochs = 24 18 | log_config = dict(interval=1) 19 | -------------------------------------------------------------------------------- /demo/coco_test_12510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/demo/coco_test_12510.jpg -------------------------------------------------------------------------------- /demo/corruptions_sev_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/demo/corruptions_sev_3.png -------------------------------------------------------------------------------- /demo/data_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/demo/data_pipeline.png -------------------------------------------------------------------------------- /demo/demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/demo/demo.jpg -------------------------------------------------------------------------------- /demo/dota_demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/demo/dota_demo.jpg -------------------------------------------------------------------------------- /demo/image_demo.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | from mmdet.apis import inference_detector, init_detector, show_result_pyplot 4 | 5 | 6 | def main(): 7 | parser = ArgumentParser() 8 | parser.add_argument('img', help='Image file') 9 | parser.add_argument('config', help='Config file') 10 | parser.add_argument('checkpoint', help='Checkpoint file') 11 | parser.add_argument( 12 | '--device', default='cuda:0', help='Device used for inference') 13 | parser.add_argument( 14 | '--score-thr', type=float, default=0.3, help='bbox score threshold') 15 | args = parser.parse_args() 16 | 17 | # build the model from a config file and a checkpoint file 18 | model = init_detector(args.config, args.checkpoint, device=args.device) 19 | # test a single image 20 | result = inference_detector(model, args.img) 21 | # show the results 22 | show_result_pyplot(model, args.img, result, score_thr=args.score_thr) 23 | 24 | 25 | if __name__ == '__main__': 26 | main() 27 | -------------------------------------------------------------------------------- /demo/loss_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/demo/loss_curve.png -------------------------------------------------------------------------------- /demo/obbdet_show.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/demo/obbdet_show.jpg -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTORCH="1.5" 2 | ARG CUDA="10.1" 3 | ARG CUDNN="7" 4 | 5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel 6 | 7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" 8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" 9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" 10 | 11 | RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ 12 | && apt-get clean \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | # Install mmdetection 16 | RUN conda clean --all 17 | RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection 18 | WORKDIR /mmdetection 19 | ENV FORCE_CUDA="1" 20 | RUN pip install cython --no-cache-dir 21 | RUN pip install "git+https://github.com/open-mmlab/cocoapi.git#subdirectory=pycocotools" 22 | RUN pip install --no-cache-dir -e . 23 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to MMDetection's documentation! 2 | ======================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | install.md 8 | getting_started.md 9 | config.md 10 | model_zoo.md 11 | tutorials/finetune.md 12 | tutorials/new_dataset.md 13 | tutorials/data_pipeline.md 14 | tutorials/new_modules.md 15 | compatibility.md 16 | changelog.md 17 | projects.md 18 | api.rst 19 | 20 | 21 | Indices and tables 22 | ================== 23 | 24 | * :ref:`genindex` 25 | * :ref:`search` 26 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /figs/module.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/figs/module.png -------------------------------------------------------------------------------- /figs/motivation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/figs/motivation.png -------------------------------------------------------------------------------- /figs/rotate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/figs/rotate.png -------------------------------------------------------------------------------- /mmdet/VERSION: -------------------------------------------------------------------------------- 1 | 2.2.0 2 | -------------------------------------------------------------------------------- /mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__, short_version 2 | 3 | __all__ = ['__version__', 'short_version'] 4 | -------------------------------------------------------------------------------- /mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import (async_inference_detector, inference_detector, 2 | init_detector, show_result_pyplot) 3 | from .test import multi_gpu_test, single_gpu_test 4 | from .train import get_root_logger, set_random_seed, train_detector 5 | 6 | from .obb.huge_img_inference import (get_windows, inference_detector_huge_image, 7 | merge_patch_results) 8 | 9 | __all__ = [ 10 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 11 | 'async_inference_detector', 'inference_detector', 'show_result_pyplot', 12 | 'multi_gpu_test', 'single_gpu_test' 13 | ] 14 | -------------------------------------------------------------------------------- /mmdet/apis/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/apis/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .evaluation import * # noqa: F401, F403 4 | from .fp16 import * # noqa: F401, F403 5 | from .mask import * # noqa: F401, F403 6 | from .post_processing import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | from .hooks import * # npqa: F401, F403 9 | # from .common_module import * # noqa: F401, F403 10 | -------------------------------------------------------------------------------- /mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator, LegacyAnchorGenerator 2 | from .builder import ANCHOR_GENERATORS, build_anchor_generator 3 | from .point_generator import PointGenerator 4 | from .utils import anchor_inside_flags, calc_region, images_to_levels 5 | 6 | from .obb.theta0_anchor_generator import Theta0AnchorGenerator 7 | 8 | __all__ = [ 9 | 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', 10 | 'PointGenerator', 'images_to_levels', 'calc_region', 11 | 'build_anchor_generator', 'ANCHOR_GENERATORS' 12 | ] 13 | -------------------------------------------------------------------------------- /mmdet/core/anchor/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | ANCHOR_GENERATORS = Registry('Anchor generator') 4 | 5 | 6 | def build_anchor_generator(cfg, default_args=None): 7 | return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args) 8 | -------------------------------------------------------------------------------- /mmdet/core/anchor/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/core/anchor/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/core/anchor/obb/theta0_anchor_generator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from ..builder import ANCHOR_GENERATORS 3 | from ..anchor_generator import AnchorGenerator 4 | 5 | 6 | @ANCHOR_GENERATORS.register_module() 7 | class Theta0AnchorGenerator(AnchorGenerator): 8 | 9 | def single_level_grid_anchors(self, 10 | base_anchors, 11 | featmap_size, 12 | stride=(16, 16), 13 | device='cuda'): 14 | anchors = super(Theta0AnchorGenerator, self).single_level_grid_anchors( 15 | base_anchors, featmap_size, stride=stride, device=device) 16 | 17 | num_anchors = anchors.size(0) 18 | xy = (anchors[:, 2:] + anchors[:, :2]) / 2 19 | wh = anchors[:, 2:] - anchors[:, :2] 20 | theta = xy.new_zeros((num_anchors, 1)) 21 | 22 | anchors = torch.cat([xy, wh, theta], axis=1) 23 | return anchors 24 | -------------------------------------------------------------------------------- /mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 2 | from .assign_result import AssignResult 3 | from .atss_assigner import ATSSAssigner 4 | from .base_assigner import BaseAssigner 5 | from .center_region_assigner import CenterRegionAssigner 6 | from .max_iou_assigner import MaxIoUAssigner 7 | from .point_assigner import PointAssigner 8 | 9 | from .obb2hbb_max_iou_assigner import OBB2HBBMaxIoUAssigner 10 | 11 | __all__ = [ 12 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 13 | 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner' 14 | ] 15 | -------------------------------------------------------------------------------- /mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | """Base assigner that assigns boxes to ground truth boxes""" 6 | 7 | @abstractmethod 8 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 9 | """Assign boxes to either a ground truth boxe or a negative boxes""" 10 | pass 11 | -------------------------------------------------------------------------------- /mmdet/core/bbox/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | BBOX_ASSIGNERS = Registry('bbox_assigner') 4 | BBOX_SAMPLERS = Registry('bbox_sampler') 5 | BBOX_CODERS = Registry('bbox_coder') 6 | 7 | 8 | def build_assigner(cfg, **default_args): 9 | """Builder of box assigner""" 10 | return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) 11 | 12 | 13 | def build_sampler(cfg, **default_args): 14 | """Builder of box sampler""" 15 | return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) 16 | 17 | 18 | def build_bbox_coder(cfg, **default_args): 19 | """Builder of box coder""" 20 | return build_from_cfg(cfg, BBOX_CODERS, default_args) 21 | -------------------------------------------------------------------------------- /mmdet/core/bbox/coder/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_bbox_coder import BaseBBoxCoder 2 | from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder 3 | from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder 4 | from .pseudo_bbox_coder import PseudoBBoxCoder 5 | from .tblr_bbox_coder import TBLRBBoxCoder 6 | 7 | from .obb.obb2obb_delta_xywht_coder import OBB2OBBDeltaXYWHTCoder 8 | from .obb.hbb2obb_delta_xywht_coder import HBB2OBBDeltaXYWHTCoder 9 | from .obb.gliding_vertex_coders import GVFixCoder, GVRatioCoder 10 | from .obb.midpoint_offset_coder import MidpointOffsetCoder 11 | 12 | __all__ = [ 13 | 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 14 | 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 15 | 'OBB2OBBDeltaXYWHTCoder', 'HBB2OBBDeltaXYWHTCoder' 16 | ] 17 | -------------------------------------------------------------------------------- /mmdet/core/bbox/coder/base_bbox_coder.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseBBoxCoder(metaclass=ABCMeta): 5 | """Base bounding box coder""" 6 | 7 | def __init__(self, **kwargs): 8 | pass 9 | 10 | @abstractmethod 11 | def encode(self, bboxes, gt_bboxes): 12 | """Encode deltas between bboxes and ground truth boxes""" 13 | pass 14 | 15 | @abstractmethod 16 | def decode(self, bboxes, bboxes_pred): 17 | """ 18 | Decode the predicted bboxes according to prediction and base boxes 19 | """ 20 | pass 21 | -------------------------------------------------------------------------------- /mmdet/core/bbox/coder/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/core/bbox/coder/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/core/bbox/coder/pseudo_bbox_coder.py: -------------------------------------------------------------------------------- 1 | from ..builder import BBOX_CODERS 2 | from .base_bbox_coder import BaseBBoxCoder 3 | 4 | 5 | @BBOX_CODERS.register_module() 6 | class PseudoBBoxCoder(BaseBBoxCoder): 7 | """Pseudo bounding box coder""" 8 | 9 | def __init__(self, **kwargs): 10 | super(BaseBBoxCoder, self).__init__(**kwargs) 11 | 12 | def encode(self, bboxes, gt_bboxes): 13 | """torch.Tensor: return the given ``bboxes``""" 14 | return gt_bboxes 15 | 16 | def decode(self, bboxes, pred_bboxes): 17 | """torch.Tensor: return the given ``pred_bboxes``""" 18 | return pred_bboxes 19 | -------------------------------------------------------------------------------- /mmdet/core/bbox/iou_calculators/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_iou_calculator 2 | from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps 3 | 4 | from .obb.obbiou_calculator import OBBOverlaps, PolyOverlaps 5 | 6 | __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps', 7 | 'OBBOverlaps', 'PolyOverlaps' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdet/core/bbox/iou_calculators/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | IOU_CALCULATORS = Registry('IoU calculator') 4 | 5 | 6 | def build_iou_calculator(cfg, default_args=None): 7 | """Builder of IoU calculator""" 8 | return build_from_cfg(cfg, IOU_CALCULATORS, default_args) 9 | -------------------------------------------------------------------------------- /mmdet/core/bbox/iou_calculators/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/core/bbox/iou_calculators/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .combined_sampler import CombinedSampler 3 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 4 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 5 | from .ohem_sampler import OHEMSampler 6 | from .pseudo_sampler import PseudoSampler 7 | from .random_sampler import RandomSampler 8 | from .sampling_result import SamplingResult 9 | from .score_hlr_sampler import ScoreHLRSampler 10 | 11 | from .obb import (OBBSamplingResult, OBBBaseSampler, OBBRandomSampler, 12 | OBBOHEMSampler) 13 | 14 | __all__ = [ 15 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 16 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 17 | 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 18 | 19 | 'OBBSamplingResult', 'OBBBaseSampler', 'OBBRandomSampler', 20 | 'OBBOHEMSampler' 21 | ] 22 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from ..builder import BBOX_SAMPLERS, build_sampler 2 | from .base_sampler import BaseSampler 3 | 4 | 5 | @BBOX_SAMPLERS.register_module() 6 | class CombinedSampler(BaseSampler): 7 | """A sampler that combines positive sampler and negative sampler""" 8 | 9 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 10 | super(CombinedSampler, self).__init__(**kwargs) 11 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 12 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 13 | 14 | def _sample_pos(self, **kwargs): 15 | """Sample positive samples""" 16 | raise NotImplementedError 17 | 18 | def _sample_neg(self, **kwargs): 19 | """Sample negative samples""" 20 | raise NotImplementedError 21 | -------------------------------------------------------------------------------- /mmdet/core/bbox/samplers/obb/__init__.py: -------------------------------------------------------------------------------- 1 | from .obb_sampling_result import OBBSamplingResult 2 | from .obb_base_sampler import OBBBaseSampler 3 | from .obb_random_sampler import OBBRandomSampler 4 | from .obb_ohem_sampler import OBBOHEMSampler 5 | -------------------------------------------------------------------------------- /mmdet/core/bbox/transforms_obb/__init__.py: -------------------------------------------------------------------------------- 1 | from .form import (poly2obb, rectpoly2obb, poly2hbb, obb2poly, obb2hbb, 2 | hbb2poly, hbb2obb, bbox2type) 3 | from .mapping import (hbb_flip, obb_flip, poly_flip, hbb_warp, obb_warp, 4 | poly_warp, hbb_mapping, obb_mapping, poly_mapping, 5 | hbb_mapping_back, obb_mapping_back, poly_mapping_back, 6 | arb_mapping, arb_mapping_back) 7 | from .misc import (get_bbox_type, get_bbox_dim, get_bbox_areas, choice_by_type, 8 | arb2result, arb2roi, distance2obb, regular_theta, regular_obb, 9 | mintheta_obb) 10 | -------------------------------------------------------------------------------- /mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, 2 | get_classes, imagenet_det_classes, 3 | imagenet_vid_classes, voc_classes) 4 | from .eval_hooks import DistEvalHook, EvalHook 5 | from .mean_ap import average_precision, eval_map, print_map_summary 6 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, 7 | print_recall_summary) 8 | from .obb.obb_mean_ap import eval_arb_map 9 | from .obb.obb_recall import eval_arb_recalls 10 | 11 | __all__ = [ 12 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 13 | 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 14 | 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 15 | 'print_map_summary', 'eval_recalls', 'print_recall_summary', 16 | 'plot_num_recall', 'plot_iou_recall', 17 | 'eval_arb_map', 'eval_arb_recalls' 18 | ] 19 | -------------------------------------------------------------------------------- /mmdet/core/evaluation/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/core/evaluation/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/core/fp16/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import auto_fp16, force_fp32 2 | from .hooks import Fp16OptimizerHook, wrap_fp16_model 3 | 4 | __all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] 5 | -------------------------------------------------------------------------------- /mmdet/core/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | from .random_fp import RandomFPHook 2 | -------------------------------------------------------------------------------- /mmdet/core/hooks/random_fp.py: -------------------------------------------------------------------------------- 1 | from mmcv.runner.hooks import Hook 2 | 3 | 4 | class RandomFPHook(Hook): 5 | ''' 6 | Shuffle false patchs in training 7 | ''' 8 | 9 | def after_train_epoch(self, runner): 10 | dataset = runner.data_loader.dataset 11 | if not hasattr(dataset, 'add_random_fp'): 12 | return 13 | 14 | data_infos = dataset.add_random_fp() 15 | ori_infos = runner.data_loader.dataset.data_infos 16 | assert len(data_infos) == len(ori_infos) 17 | 18 | runner.data_loader.dataset.data_infos = data_infos 19 | -------------------------------------------------------------------------------- /mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .mask_target import mask_target 2 | from .structures import BitmapMasks, PolygonMasks 3 | from .utils import encode_mask_results, split_combined_polys 4 | from .obb.obb_mask_target import obb_mask_target 5 | 6 | __all__ = [ 7 | 'split_combined_polys', 'mask_target', 'BitmapMasks', 'PolygonMasks', 8 | 'encode_mask_results', 'obb_mask_target' 9 | ] 10 | -------------------------------------------------------------------------------- /mmdet/core/mask/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/core/mask/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks, 3 | merge_aug_proposals, merge_aug_scores) 4 | 5 | from .obb import (multiclass_arb_nms, merge_rotate_aug_proposals, merge_rotate_aug_hbb, 6 | merge_rotate_aug_obb, merge_rotate_aug_arb) 7 | 8 | __all__ = [ 9 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 10 | 'merge_aug_scores', 'merge_aug_masks', 11 | 'multiclass_arb_nms', 'merge_rotate_aug_proposals', 'merge_rotate_aug_hbb', 12 | 'merge_rotate_aug_obb', 'merge_rotate_aug_arb' 13 | ] 14 | -------------------------------------------------------------------------------- /mmdet/core/post_processing/obb/__init__.py: -------------------------------------------------------------------------------- 1 | from .obb_nms import multiclass_arb_nms 2 | from .obb_merge_augs import (merge_rotate_aug_proposals, merge_rotate_aug_hbb, 3 | merge_rotate_aug_obb, merge_rotate_aug_poly, 4 | merge_rotate_aug_arb) 5 | -------------------------------------------------------------------------------- /mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import DistOptimizerHook, allreduce_grads 2 | from .misc import multi_apply, tensor2imgs, unmap 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'multi_apply', 6 | 'unmap' 7 | ] 8 | -------------------------------------------------------------------------------- /mmdet/datasets/deepfashion.py: -------------------------------------------------------------------------------- 1 | from .builder import DATASETS 2 | from .coco import CocoDataset 3 | 4 | 5 | @DATASETS.register_module() 6 | class DeepFashionDataset(CocoDataset): 7 | 8 | CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 9 | 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 10 | 'skin', 'face') 11 | -------------------------------------------------------------------------------- /mmdet/datasets/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/datasets/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/datasets/pipelines/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/datasets/pipelines/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .distributed_sampler import DistributedSampler 2 | from .group_sampler import DistributedGroupSampler, GroupSampler 3 | 4 | __all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler'] 5 | -------------------------------------------------------------------------------- /mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 3 | ROI_EXTRACTORS, SHARED_HEADS, build_backbone, 4 | build_detector, build_head, build_loss, build_neck, 5 | build_roi_extractor, build_shared_head) 6 | from .dense_heads import * # noqa: F401,F403 7 | from .detectors import * # noqa: F401,F403 8 | from .losses import * # noqa: F401,F403 9 | from .necks import * # noqa: F401,F403 10 | from .roi_heads import * # noqa: F401,F403 11 | 12 | __all__ = [ 13 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 14 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 15 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 16 | ] 17 | -------------------------------------------------------------------------------- /mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .detectors_resnet import DetectoRS_ResNet 2 | from .detectors_resnext import DetectoRS_ResNeXt 3 | from .hourglass import HourglassNet 4 | from .hrnet import HRNet 5 | from .regnet import RegNet 6 | from .res2net import Res2Net 7 | from .resnet import ResNet, ResNetV1d 8 | from .resnext import ResNeXt 9 | from .ssd_vgg import SSDVGG 10 | from .arc_resnet import ARCResNet 11 | 12 | __all__ = [ 13 | 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net', 14 | 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'ARCResNet', 15 | ] 16 | -------------------------------------------------------------------------------- /mmdet/models/backbones/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .adaptive_rotated_conv import AdaptiveRotatedConv2d 2 | from .routing_function import RountingFunction 3 | 4 | __all__ = [ 5 | 'AdaptiveRotatedConv2d', 'RountingFunction', 6 | ] 7 | -------------------------------------------------------------------------------- /mmdet/models/dense_heads/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/models/dense_heads/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/models/detectors/atss.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class ATSS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FasterRCNN(TwoStageDetector): 7 | """Implementation of `Faster R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | rpn_head, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | pretrained=None): 17 | super(FasterRCNN, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | roi_head=roi_head, 22 | train_cfg=train_cfg, 23 | test_cfg=test_cfg, 24 | pretrained=pretrained) 25 | -------------------------------------------------------------------------------- /mmdet/models/detectors/fcos.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FCOS(SingleStageDetector): 7 | """Implementation of `FCOS `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdet/models/detectors/fovea.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FOVEA(SingleStageDetector): 7 | """Implementation of `FoveaBox `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdet/models/detectors/fsaf.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FSAF(SingleStageDetector): 7 | """Implementation of `FSAF `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdet/models/detectors/gfl.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class GFL(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdet/models/detectors/grid_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class GridRCNN(TwoStageDetector): 7 | """Grid R-CNN. 8 | 9 | This detector is the implementation of: 10 | - Grid R-CNN (https://arxiv.org/abs/1811.12030) 11 | - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) 12 | """ 13 | 14 | def __init__(self, 15 | backbone, 16 | rpn_head, 17 | roi_head, 18 | train_cfg, 19 | test_cfg, 20 | neck=None, 21 | pretrained=None): 22 | super(GridRCNN, self).__init__( 23 | backbone=backbone, 24 | neck=neck, 25 | rpn_head=rpn_head, 26 | roi_head=roi_head, 27 | train_cfg=train_cfg, 28 | test_cfg=test_cfg, 29 | pretrained=pretrained) 30 | -------------------------------------------------------------------------------- /mmdet/models/detectors/htc.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .cascade_rcnn import CascadeRCNN 3 | 4 | 5 | @DETECTORS.register_module() 6 | class HybridTaskCascade(CascadeRCNN): 7 | """Implementation of `HTC `_""" 8 | 9 | def __init__(self, **kwargs): 10 | super(HybridTaskCascade, self).__init__(**kwargs) 11 | 12 | @property 13 | def with_semantic(self): 14 | """bool: whether the detector has a semantic head""" 15 | return self.roi_head.with_semantic 16 | -------------------------------------------------------------------------------- /mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class MaskRCNN(TwoStageDetector): 7 | """Implementation of `Mask R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | rpn_head, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | pretrained=None): 17 | super(MaskRCNN, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | roi_head=roi_head, 22 | train_cfg=train_cfg, 23 | test_cfg=test_cfg, 24 | pretrained=pretrained) 25 | -------------------------------------------------------------------------------- /mmdet/models/detectors/mask_scoring_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class MaskScoringRCNN(TwoStageDetector): 7 | """Mask Scoring RCNN. 8 | 9 | https://arxiv.org/abs/1903.00241 10 | """ 11 | 12 | def __init__(self, 13 | backbone, 14 | rpn_head, 15 | roi_head, 16 | train_cfg, 17 | test_cfg, 18 | neck=None, 19 | pretrained=None): 20 | super(MaskScoringRCNN, self).__init__( 21 | backbone=backbone, 22 | neck=neck, 23 | rpn_head=rpn_head, 24 | roi_head=roi_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | pretrained=pretrained) 28 | -------------------------------------------------------------------------------- /mmdet/models/detectors/nasfcos.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class NASFCOS(SingleStageDetector): 7 | """NAS-FCOS: Fast Neural Architecture Search for Object Detection. 8 | 9 | https://arxiv.org/abs/1906.0442 10 | """ 11 | 12 | def __init__(self, 13 | backbone, 14 | neck, 15 | bbox_head, 16 | train_cfg=None, 17 | test_cfg=None, 18 | pretrained=None): 19 | super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 20 | test_cfg, pretrained) 21 | -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/models/detectors/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/faster_rcnn_obb.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .obb_two_stage import OBBTwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FasterRCNNOBB(OBBTwoStageDetector): 7 | """Implementation of `Faster R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | rpn_head, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | pretrained=None): 17 | super(FasterRCNNOBB, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | roi_head=roi_head, 22 | train_cfg=train_cfg, 23 | test_cfg=test_cfg, 24 | pretrained=pretrained) 25 | -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/fcos_obb.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .obb_single_stage import OBBSingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FCOSOBB(OBBSingleStageDetector): 7 | """Implementation of `FCOS `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FCOSOBB, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/gliding_vertex.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .obb_two_stage import OBBTwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class GlidingVertex(OBBTwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | rpn_head, 11 | roi_head, 12 | train_cfg, 13 | test_cfg, 14 | neck=None, 15 | pretrained=None): 16 | super(GlidingVertex, self).__init__( 17 | backbone=backbone, 18 | neck=neck, 19 | rpn_head=rpn_head, 20 | roi_head=roi_head, 21 | train_cfg=train_cfg, 22 | test_cfg=test_cfg, 23 | pretrained=pretrained) 24 | -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/oriented_rcnn.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .obb_two_stage import OBBTwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class OrientedRCNN(OBBTwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck=None, 11 | rpn_head=None, 12 | roi_head=None, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(OrientedRCNN, self).__init__( 17 | backbone=backbone, 18 | neck=neck, 19 | rpn_head=rpn_head, 20 | roi_head=roi_head, 21 | train_cfg=train_cfg, 22 | test_cfg=test_cfg, 23 | pretrained=pretrained) 24 | -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/retinanet_obb.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .obb_single_stage import OBBSingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class RetinaNetOBB(OBBSingleStageDetector): 7 | """Implementation of `RetinaNet `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(RetinaNetOBB, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/roi_transformer.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .obb_two_stage import OBBTwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class RoITransformer(OBBTwoStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck=None, 11 | rpn_head=None, 12 | roi_head=None, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(RoITransformer, self).__init__( 17 | backbone=backbone, 18 | neck=neck, 19 | rpn_head=rpn_head, 20 | roi_head=roi_head, 21 | train_cfg=train_cfg, 22 | test_cfg=test_cfg, 23 | pretrained=pretrained) 24 | -------------------------------------------------------------------------------- /mmdet/models/detectors/obb/s2anet.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .obb_single_stage import OBBSingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class S2ANet(OBBSingleStageDetector): 7 | """Implementation of `RetinaNet `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(S2ANet, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdet/models/detectors/point_rend.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class PointRend(TwoStageDetector): 7 | """PointRend: Image Segmentation as Rendering 8 | 9 | This detector is the implementation of 10 | `PointRend `_. 11 | 12 | """ 13 | 14 | def __init__(self, 15 | backbone, 16 | rpn_head, 17 | roi_head, 18 | train_cfg, 19 | test_cfg, 20 | neck=None, 21 | pretrained=None): 22 | super(PointRend, self).__init__( 23 | backbone=backbone, 24 | neck=neck, 25 | rpn_head=rpn_head, 26 | roi_head=roi_head, 27 | train_cfg=train_cfg, 28 | test_cfg=test_cfg, 29 | pretrained=pretrained) 30 | -------------------------------------------------------------------------------- /mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class RetinaNet(SingleStageDetector): 7 | """Implementation of `RetinaNet `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdet/models/losses/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/models/losses/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .bfp import BFP 2 | from .fpn import FPN 3 | from .fpn_carafe import FPN_CARAFE 4 | from .hrfpn import HRFPN 5 | from .nas_fpn import NASFPN 6 | from .nasfcos_fpn import NASFCOS_FPN 7 | from .pafpn import PAFPN 8 | from .rfp import RFP 9 | 10 | __all__ = [ 11 | 'FPN', 'BFP', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', 'NASFCOS_FPN', 12 | 'RFP' 13 | ] 14 | -------------------------------------------------------------------------------- /mmdet/models/roi_heads/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, 3 | Shared4Conv1FCBBoxHead) 4 | from .double_bbox_head import DoubleConvFCBBoxHead 5 | 6 | from .obb.obbox_head import OBBoxHead 7 | from .obb.obb_convfc_bbox_head import (OBBConvFCBBoxHead, OBBShared2FCBBoxHead, 8 | OBBShared4Conv1FCBBoxHead) 9 | from .obb.obb_double_bbox_head import OBBDoubleConvFCBBoxHead 10 | from .obb.gv_bbox_head import GVBBoxHead 11 | 12 | __all__ = [ 13 | 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 14 | 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 15 | 16 | 'OBBoxHead', 'OBBConvFCBBoxHead', 'OBBShared2FCBBoxHead', 17 | 'OBBShared4Conv1FCBBoxHead' 18 | ] 19 | -------------------------------------------------------------------------------- /mmdet/models/roi_heads/bbox_heads/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/models/roi_heads/bbox_heads/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/models/roi_heads/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .coarse_mask_head import CoarseMaskHead 2 | from .fcn_mask_head import FCNMaskHead 3 | from .fused_semantic_head import FusedSemanticHead 4 | from .grid_head import GridHead 5 | from .htc_mask_head import HTCMaskHead 6 | from .mask_point_head import MaskPointHead 7 | from .maskiou_head import MaskIoUHead 8 | from .obb.obb_fcn_mask_head import OBBFCNMaskHead 9 | 10 | __all__ = [ 11 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 12 | 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead' 13 | ] 14 | -------------------------------------------------------------------------------- /mmdet/models/roi_heads/mask_heads/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/models/roi_heads/mask_heads/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/models/roi_heads/obb/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /mmdet/models/roi_heads/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .generic_roi_extractor import GenericRoIExtractor 2 | from .single_level_roi_extractor import SingleRoIExtractor 3 | 4 | from .obb.obb_single_level_roi_extractor import OBBSingleRoIExtractor 5 | from .obb.hbb_select_level_roi_extractor import HBBSelectLVLRoIExtractor 6 | 7 | __all__ = [ 8 | 'SingleRoIExtractor', 9 | 'GenericRoIExtractor', 10 | 11 | 'OBBSingleRoIExtractor', 12 | ] 13 | -------------------------------------------------------------------------------- /mmdet/models/roi_heads/roi_extractors/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LeapLabTHU/ARC/382fcdb616d544c63e73d22c3cf3a429f06bd1eb/mmdet/models/roi_heads/roi_extractors/obb/__init__.py -------------------------------------------------------------------------------- /mmdet/models/roi_heads/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/box_iou_rotated/__init__.py: -------------------------------------------------------------------------------- 1 | from .box_iou_rotated_wrapper import obb_overlaps 2 | -------------------------------------------------------------------------------- /mmdet/ops/convex/__init__.py: -------------------------------------------------------------------------------- 1 | from .convex_wrapper import convex_sort 2 | 3 | __all = ['convex_sort'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/convex/convex_wrapper.py: -------------------------------------------------------------------------------- 1 | from torch.autograd import Function 2 | from . import convex_ext 3 | 4 | 5 | class ConvexSortFunction(Function): 6 | 7 | @staticmethod 8 | def forward(ctx, pts, masks, circular): 9 | idx = convex_ext.convex_sort(pts, masks, circular) 10 | ctx.mark_non_differentiable(idx) 11 | return idx 12 | 13 | @staticmethod 14 | def backward(ctx, grad_output): 15 | return () 16 | 17 | convex_sort_func = ConvexSortFunction.apply 18 | 19 | 20 | def convex_sort(pts, masks, circular=True): 21 | return convex_sort_func(pts, masks, circular) 22 | 23 | 24 | -------------------------------------------------------------------------------- /mmdet/ops/convex/src/convex_ext.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #ifdef WITH_CUDA 5 | at::Tensor convex_sort_cuda( 6 | const at::Tensor& pts, const at::Tensor& masks, const bool circular); 7 | #endif 8 | 9 | at::Tensor convex_sort_cpu( 10 | const at::Tensor& pts, const at::Tensor& masks, const bool circular); 11 | 12 | 13 | at::Tensor convex_sort( 14 | const at::Tensor& pts, const at::Tensor& masks, const bool circular) { 15 | if (pts.device().is_cuda()) { 16 | #ifdef WITH_CUDA 17 | return convex_sort_cuda(pts, masks, circular); 18 | #else 19 | AT_ERROR("sort_vert is not compiled with GPU support"); 20 | #endif 21 | } 22 | return convex_sort_cpu(pts, masks, circular); 23 | } 24 | 25 | 26 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 27 | m.def("convex_sort", &convex_sort, "select the convex points and sort them"); 28 | } 29 | -------------------------------------------------------------------------------- /mmdet/ops/corner_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .corner_pool import CornerPool 2 | 3 | __all__ = ['CornerPool'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/masked_conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .masked_conv import MaskedConv2d, masked_conv2d 2 | 3 | __all__ = ['masked_conv2d', 'MaskedConv2d'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import batched_nms, nms, nms_match, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms', 'batched_nms', 'nms_match'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/nms/src/cuda/nms_cuda.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | 4 | #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ") 5 | 6 | at::Tensor nms_cuda_forward(const at::Tensor boxes, float nms_overlap_thresh); 7 | 8 | at::Tensor nms_cuda(const at::Tensor& dets, const float threshold) { 9 | CHECK_CUDA(dets); 10 | if (dets.numel() == 0) 11 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); 12 | return nms_cuda_forward(dets, threshold); 13 | } 14 | -------------------------------------------------------------------------------- /mmdet/ops/nms_rotated/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_rotated_wrapper import obb_nms, poly_nms, BT_nms, arb_batched_nms 2 | 3 | __all__ = ['obb_nms', 'poly_nms', 'BT_nms', 'arb_batched_nms'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/nms_rotated/src/poly_nms_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | template 4 | at::Tensor poly_nms_cpu_kernel(const at::Tensor& dets, const float threshold) { 5 | 6 | -------------------------------------------------------------------------------- /mmdet/ops/orn/__init__.py: -------------------------------------------------------------------------------- 1 | from .modules.ORConv import ORConv2d 2 | from .functions import rotation_invariant_encoding,RotationInvariantPooling 3 | -------------------------------------------------------------------------------- /mmdet/ops/orn/functions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | import torch 3 | from .active_rotating_filter import active_rotating_filter 4 | from .active_rotating_filter import ActiveRotatingFilter 5 | from .rotation_invariant_encoding import rotation_invariant_encoding 6 | from .rotation_invariant_encoding import RotationInvariantEncoding 7 | from .rotation_invariant_pooling import RotationInvariantPooling 8 | 9 | __all__ = ['ActiveRotatingFilter', 'active_rotating_filter', 'rotation_invariant_encoding', 'RotationInvariantEncoding', 'RotationInvariantPooling'] -------------------------------------------------------------------------------- /mmdet/ops/orn/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .ORConv import ORConv2d 2 | #from .ORConv_v2 import ORConv2d_v2 3 | 4 | #__all__ = ['ORConv2d', 'ORConv2d_v2'] 5 | __all__ = ['ORConv2d'] 6 | -------------------------------------------------------------------------------- /mmdet/ops/orn/src/ActiveRotatingFilter.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #pragma once 3 | 4 | #include "./cpu/vision.h" 5 | 6 | #ifdef WITH_CUDA 7 | #include "./cuda/vision.h" 8 | #endif 9 | 10 | // Interface for Python 11 | at::Tensor ARF_forward(const at::Tensor& weight, 12 | const at::Tensor& indices) { 13 | if (weight.type().is_cuda()) { 14 | #ifdef WITH_CUDA 15 | return ARF_forward_cuda(weight, indices); 16 | #else 17 | AT_ERROR("Not compiled with GPU support"); 18 | #endif 19 | } 20 | return ARF_forward_cpu(weight, indices); 21 | } 22 | 23 | at::Tensor ARF_backward(const at::Tensor& indices, 24 | const at::Tensor& gradOutput) { 25 | if (gradOutput.type().is_cuda()) { 26 | #ifdef WITH_CUDA 27 | return ARF_backward_cuda(indices, gradOutput); 28 | #else 29 | AT_ERROR("Not compiled with GPU support"); 30 | #endif 31 | } 32 | return ARF_backward_cpu(indices, gradOutput); 33 | AT_ERROR("Not implemented on the CPU"); 34 | } -------------------------------------------------------------------------------- /mmdet/ops/orn/src/cpu/vision.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #pragma once 3 | // #include 4 | #include 5 | 6 | typedef unsigned long uint64; 7 | typedef unsigned int uint32; 8 | typedef unsigned short uint16; 9 | typedef unsigned char uint8; 10 | 11 | 12 | std::tuple RIE_forward_cpu(const at::Tensor& feature, 13 | const uint8 nOrientation); 14 | 15 | at::Tensor RIE_backward_cpu(const at::Tensor& mainDirection, 16 | const at::Tensor& gradOutput, 17 | const uint8 nOrientation); 18 | 19 | at::Tensor ARF_forward_cpu(const at::Tensor& weight, 20 | const at::Tensor& indices); 21 | 22 | at::Tensor ARF_backward_cpu(const at::Tensor& indices, 23 | const at::Tensor& gradOutput); -------------------------------------------------------------------------------- /mmdet/ops/orn/src/cuda/vision.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #pragma once 3 | // #include 4 | #include 5 | 6 | typedef unsigned long uint64; 7 | typedef unsigned int uint32; 8 | typedef unsigned short uint16; 9 | typedef unsigned char uint8; 10 | 11 | 12 | std::tuple RIE_forward_cuda(const at::Tensor& feature, 13 | const uint8 nOrientation); 14 | 15 | at::Tensor RIE_backward_cuda(const at::Tensor& mainDirection, 16 | const at::Tensor& gradOutput, 17 | const uint8 nOrientation); 18 | 19 | at::Tensor ARF_forward_cuda(const at::Tensor& weight, 20 | const at::Tensor& indices); 21 | 22 | at::Tensor ARF_backward_cuda(const at::Tensor& indices, 23 | const at::Tensor& gradOutput); -------------------------------------------------------------------------------- /mmdet/ops/orn/src/vision.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | #include "./ActiveRotatingFilter.h" 4 | #include "./RotationInvariantEncoding.h" 5 | 6 | 7 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 8 | m.def("arf_forward", &ARF_forward, "ARF_forward"); 9 | m.def("arf_backward", &ARF_backward, "ARF_backward"); 10 | m.def("rie_forward", &RIE_forward, "RIE_forward"); 11 | m.def("rie_backward", &RIE_backward, "RIE_backward"); 12 | } -------------------------------------------------------------------------------- /mmdet/ops/roi_align/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_align import RoIAlign, roi_align 2 | 3 | __all__ = ['roi_align', 'RoIAlign'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/roi_align/gradcheck.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | import numpy as np 5 | import torch 6 | from torch.autograd import gradcheck 7 | 8 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 9 | from roi_align import RoIAlign # noqa: E402, isort:skip 10 | 11 | feat_size = 15 12 | spatial_scale = 1.0 / 8 13 | img_size = feat_size / spatial_scale 14 | num_imgs = 2 15 | num_rois = 20 16 | 17 | batch_ind = np.random.randint(num_imgs, size=(num_rois, 1)) 18 | rois = np.random.rand(num_rois, 4) * img_size * 0.5 19 | rois[:, 2:] += img_size * 0.5 20 | rois = np.hstack((batch_ind, rois)) 21 | 22 | feat = torch.randn( 23 | num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0') 24 | rois = torch.from_numpy(rois).float().cuda() 25 | inputs = (feat, rois) 26 | print('Gradcheck for roi align...') 27 | test = gradcheck(RoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3) 28 | print(test) 29 | test = gradcheck(RoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3) 30 | print(test) 31 | -------------------------------------------------------------------------------- /mmdet/ops/roi_align_rotated/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_align_rotated import RoIAlignRotated, roi_align_rotated 2 | 3 | __all__ = ['roi_align_rotated', 'RoIAlignRotated'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/roi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_pool import RoIPool, roi_pool 2 | 3 | __all__ = ['roi_pool', 'RoIPool'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/roi_pool/gradcheck.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | import torch 5 | from torch.autograd import gradcheck 6 | 7 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 8 | from roi_pool import RoIPool # noqa: E402, isort:skip 9 | 10 | feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() 11 | rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], 12 | [1, 67, 40, 110, 120]]).cuda() 13 | inputs = (feat, rois) 14 | print('Gradcheck for roi pooling...') 15 | test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) 16 | print(test) 17 | -------------------------------------------------------------------------------- /mmdet/ops/sigmoid_focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 2 | 3 | __all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] 4 | -------------------------------------------------------------------------------- /mmdet/ops/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from . import compiling_info 2 | from .compiling_info import get_compiler_version, get_compiling_cuda_version 3 | 4 | # get_compiler_version = compiling_info.get_compiler_version 5 | # get_compiling_cuda_version = compiling_info.get_compiling_cuda_version 6 | 7 | __all__ = ['get_compiler_version', 'get_compiling_cuda_version'] 8 | -------------------------------------------------------------------------------- /mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .logger import get_root_logger 3 | 4 | __all__ = ['get_root_logger', 'collect_env'] 5 | -------------------------------------------------------------------------------- /mmdet/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from mmcv.utils import get_logger 4 | 5 | 6 | def get_root_logger(log_file=None, log_level=logging.INFO): 7 | """Get root logger 8 | 9 | Args: 10 | log_file (str, optional): File path of log. Defaults to None. 11 | log_level (int, optional): The level of logger. 12 | Defaults to logging.INFO. 13 | 14 | Returns: 15 | :obj:`logging.Logger`: The obtained logger 16 | """ 17 | logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) 18 | 19 | return logger 20 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/build.txt 2 | -r requirements/optional.txt 3 | -r requirements/runtime.txt 4 | -r requirements/tests.txt 5 | -------------------------------------------------------------------------------- /requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building mmdetection 2 | numpy 3 | torch>=1.3 4 | -------------------------------------------------------------------------------- /requirements/docs.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx 3 | sphinx_markdown_tables 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /requirements/optional.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 2 | cityscapesscripts 3 | imagecorruptions 4 | -------------------------------------------------------------------------------- /requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv 2 | torch 3 | torchvision 4 | -------------------------------------------------------------------------------- /requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | mmcv-full>=1.3 3 | numpy 4 | # need older pillow until torchvision is fixed 5 | Pillow<=6.2.2 6 | six 7 | terminaltables 8 | torch>=1.3 9 | torchvision 10 | -------------------------------------------------------------------------------- /requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | interrogate 5 | isort 6 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 7 | kwarray 8 | pytest 9 | pytest-cov 10 | pytest-runner 11 | ubelt 12 | xdoctest >= 0.10.0 13 | yapf 14 | -------------------------------------------------------------------------------- /tests/test_pipelines/test_formatting.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from mmcv.utils import build_from_cfg 4 | 5 | from mmdet.datasets.builder import PIPELINES 6 | 7 | 8 | def test_default_format_bundle(): 9 | results = dict( 10 | img_prefix=osp.join(osp.dirname(__file__), '../data'), 11 | img_info=dict(filename='color.jpg')) 12 | load = dict(type='LoadImageFromFile') 13 | load = build_from_cfg(load, PIPELINES) 14 | bundle = dict(type='DefaultFormatBundle') 15 | bundle = build_from_cfg(bundle, PIPELINES) 16 | results = load(results) 17 | assert 'pad_shape' not in results 18 | assert 'scale_factor' not in results 19 | assert 'img_norm_cfg' not in results 20 | results = bundle(results) 21 | assert 'pad_shape' in results 22 | assert 'scale_factor' in results 23 | assert 'img_norm_cfg' in results 24 | -------------------------------------------------------------------------------- /tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | CHECKPOINT=$2 5 | GPUS=$3 6 | PORT=${PORT:-29500} 7 | 8 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 9 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 10 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 11 | -------------------------------------------------------------------------------- /tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | PORT=${PORT:-29500} 6 | 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /tools/print_config.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from mmcv import Config, DictAction 4 | 5 | 6 | def parse_args(): 7 | parser = argparse.ArgumentParser(description='Print the whole config') 8 | parser.add_argument('config', help='config file path') 9 | parser.add_argument( 10 | '--options', nargs='+', action=DictAction, help='arguments in dict') 11 | args = parser.parse_args() 12 | 13 | return args 14 | 15 | 16 | def main(): 17 | args = parse_args() 18 | 19 | cfg = Config.fromfile(args.config) 20 | if args.options is not None: 21 | cfg.merge_from_dict(args.options) 22 | print(f'Config:\n{cfg.pretty_text}') 23 | 24 | 25 | if __name__ == '__main__': 26 | main() 27 | -------------------------------------------------------------------------------- /tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${@:5} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 25 | --------------------------------------------------------------------------------