├── .github └── FUNDING.yml ├── LICENSE ├── NeurIPS 2020 Presentation - Full (1hr).pptx ├── NeurIPS 2020 Presentation - Short (5min).pptx ├── README.md ├── classification ├── .gitignore ├── README.md ├── config │ ├── CIFAR100_LT │ │ ├── causal_norm.yaml │ │ ├── causal_norm_32.yaml │ │ ├── cls_lws.yaml │ │ └── feat_unifrom.yaml │ ├── CIFAR10_LT │ │ ├── causal_norm.yaml │ │ ├── causal_norm_32.yaml │ │ ├── cls_lws.yaml │ │ └── feat_unifrom.yaml │ ├── ImageNet_LT │ │ ├── causal_norm.yaml │ │ └── cross_entropy.yaml │ └── Places_LT │ │ └── causal_norm.yaml ├── data │ ├── ClassAwareSampler.py │ ├── ClassPrioritySampler.py │ ├── ImageNet_LT │ │ ├── ImageNet_LT_test.txt │ │ ├── ImageNet_LT_train.txt │ │ └── ImageNet_LT_val.txt │ ├── ImbalanceCIFAR.py │ ├── MixedPrioritizedSampler.py │ ├── Places_LT │ │ ├── Places_LT_test.txt │ │ ├── Places_LT_train.txt │ │ └── Places_LT_val.txt │ ├── checkpoints │ │ └── final_model_checkpoint.pth │ └── dataloader.py ├── imagenet-lt.png ├── logger.py ├── long-tailed-cifar.png ├── loss │ ├── FocalLoss.py │ ├── SoftmaxLoss.py │ └── WeightedSoftmaxLoss.py ├── main.py ├── models │ ├── CausalNormClassifier.py │ ├── DotProductClassifier.py │ ├── ResNet101Feature.py │ ├── ResNet152Feature.py │ ├── ResNet32Feature.py │ ├── ResNet50Feature.py │ ├── ResNetFeature.py │ ├── ResNext101Feature.py │ ├── ResNext152Feature.py │ ├── ResNext50Feature.py │ ├── ResNextFeature.py │ └── TauNormClassifier.py ├── run_networks.py └── utils.py ├── long-tailed.png ├── lvis1.0 ├── .dev_scripts │ ├── benchmark_filter.py │ ├── gather_models.py │ └── linter.sh ├── .github │ ├── CODE_OF_CONDUCT.md │ ├── CONTRIBUTING.md │ ├── ISSUE_TEMPLATE │ │ ├── config.yml │ │ ├── error-report.md │ │ ├── feature_request.md │ │ ├── general_questions.md │ │ └── reimplementation_questions.md │ └── workflows │ │ ├── build.yml │ │ └── deploy.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── LICENSE ├── README.md ├── bg-fix-table.png ├── bg-fix.png ├── configs │ ├── _base_ │ │ ├── datasets │ │ │ ├── cityscapes_detection.py │ │ │ ├── cityscapes_instance.py │ │ │ ├── coco_detection.py │ │ │ ├── coco_instance.py │ │ │ ├── coco_instance_semantic.py │ │ │ ├── deepfashion.py │ │ │ ├── lvis_v0.5_instance.py │ │ │ ├── lvis_v1_instance.py │ │ │ ├── voc0712.py │ │ │ └── wider_face.py │ │ ├── default_runtime.py │ │ ├── models │ │ │ ├── cascade_mask_rcnn_r50_fpn.py │ │ │ ├── cascade_rcnn_r50_fpn.py │ │ │ ├── fast_rcnn_r50_fpn.py │ │ │ ├── faster_rcnn_r50_caffe_c4.py │ │ │ ├── faster_rcnn_r50_fpn.py │ │ │ ├── mask_rcnn_r50_caffe_c4.py │ │ │ ├── mask_rcnn_r50_fpn.py │ │ │ ├── retinanet_r50_fpn.py │ │ │ ├── rpn_r50_caffe_c4.py │ │ │ ├── rpn_r50_fpn.py │ │ │ └── ssd300.py │ │ └── schedules │ │ │ ├── schedule_1x.py │ │ │ ├── schedule_20e.py │ │ │ └── schedule_2x.py │ ├── albu_example │ │ ├── README.md │ │ └── mask_rcnn_r50_fpn_albu_1x_coco.py │ ├── atss │ │ ├── README.md │ │ ├── atss_r101_fpn_1x_coco.py │ │ └── atss_r50_fpn_1x_coco.py │ ├── carafe │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_carafe_1x_coco.py │ │ └── mask_rcnn_r50_fpn_carafe_1x_coco.py │ ├── cascade_rcnn │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r101_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r101_fpn_20e_coco.py │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_20e_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py │ │ ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── cascade_rcnn_r101_fpn_1x_coco.py │ │ ├── cascade_rcnn_r101_fpn_20e_coco.py │ │ ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── cascade_rcnn_r50_fpn_1x_coco.py │ │ ├── cascade_rcnn_r50_fpn_20e_coco.py │ │ ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── cascade_rcnn_x101_64x4d_fpn_20e_coco.py │ ├── cityscapes │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py │ │ └── mask_rcnn_r50_fpn_1x_cityscapes.py │ ├── cornernet │ │ ├── README.md │ │ ├── cornernet_hourglass104_mstest_10x5_210e_coco.py │ │ ├── cornernet_hourglass104_mstest_32x3_210e_coco.py │ │ └── cornernet_hourglass104_mstest_8x6_210e_coco.py │ ├── dcn │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_dpool_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ └── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ ├── deepfashion │ │ ├── README.md │ │ └── mask_rcnn_r50_fpn_15e_deepfashion.py │ ├── detectors │ │ ├── README.md │ │ ├── cascade_rcnn_r50_rfp_1x_coco.py │ │ ├── cascade_rcnn_r50_sac_1x_coco.py │ │ ├── detectors_cascade_rcnn_r50_1x_coco.py │ │ ├── detectors_htc_r50_1x_coco.py │ │ ├── htc_r50_rfp_1x_coco.py │ │ └── htc_r50_sac_1x_coco.py │ ├── double_heads │ │ ├── README.md │ │ └── dh_faster_rcnn_r50_fpn_1x_coco.py │ ├── dynamic_rcnn │ │ ├── README.md │ │ └── dynamic_rcnn_r50_fpn_1x.py │ ├── empirical_attention │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py │ │ └── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py │ ├── fast_rcnn │ │ ├── README.md │ │ ├── fast_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── fast_rcnn_r101_fpn_1x_coco.py │ │ ├── fast_rcnn_r101_fpn_2x_coco.py │ │ ├── fast_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── fast_rcnn_r50_fpn_1x_coco.py │ │ └── fast_rcnn_r50_fpn_2x_coco.py │ ├── faster_rcnn │ │ ├── README.md │ │ ├── faster_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── faster_rcnn_r101_fpn_1x_coco.py │ │ ├── faster_rcnn_r101_fpn_2x_coco.py │ │ ├── faster_rcnn_r50_caffe_c4_1x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py │ │ ├── faster_rcnn_r50_fpn_1x_coco-person-bicycle-car.py │ │ ├── faster_rcnn_r50_fpn_1x_coco-person.py │ │ ├── faster_rcnn_r50_fpn_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_2x_coco.py │ │ ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_giou_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_iou_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_ohem_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py │ │ ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── faster_rcnn_x101_64x4d_fpn_2x_coco.py │ ├── fcos │ │ ├── README.md │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_4x4_1x_coco.py │ │ ├── fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_r101_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_r101_caffe_fpn_gn-head_4x4_2x_coco.py │ │ ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fcos_r50_caffe_fpn_4x4_1x_coco.py │ │ ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py │ │ ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ └── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_4x2_2x_coco.py │ ├── foveabox │ │ ├── README.md │ │ ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py │ │ ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py │ │ ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fovea_r101_fpn_4x4_1x_coco.py │ │ ├── fovea_r101_fpn_4x4_2x_coco.py │ │ ├── fovea_r50_fpn_4x4_1x_coco.py │ │ └── fovea_r50_fpn_4x4_2x_coco.py │ ├── fp16 │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_fp16_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_fp16_1x_coco.py │ │ └── retinanet_r50_fpn_fp16_1x_coco.py │ ├── free_anchor │ │ ├── README.md │ │ ├── retinanet_free_anchor_r101_fpn_1x_coco.py │ │ ├── retinanet_free_anchor_r50_fpn_1x_coco.py │ │ └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py │ ├── fsaf │ │ ├── README.md │ │ ├── fsaf_r101_fpn_1x_coco.py │ │ ├── fsaf_r50_fpn_1x_coco.py │ │ └── fsaf_x101_64x4d_fpn_1x_coco.py │ ├── gcnet │ │ ├── README.md │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ └── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ ├── gfl │ │ ├── README.md │ │ ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py │ │ ├── gfl_r101_fpn_mstrain_2x_coco.py │ │ ├── gfl_r50_fpn_1x_coco.py │ │ ├── gfl_r50_fpn_mstrain_2x_coco.py │ │ ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py │ │ └── gfl_x101_32x4d_fpn_mstrain_2x_coco.py │ ├── ghm │ │ ├── README.md │ │ ├── retinanet_ghm_r101_fpn_1x_coco.py │ │ ├── retinanet_ghm_r50_fpn_1x_coco.py │ │ ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py │ │ └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py │ ├── gn+ws │ │ ├── README.md │ │ ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py │ │ ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ └── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py │ ├── gn │ │ ├── README.md │ │ ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py │ │ ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py │ │ └── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py │ ├── grid_rcnn │ │ ├── README.md │ │ ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py │ │ ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py │ │ ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py │ │ ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py │ │ └── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py │ ├── groie │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_groie_1x_coco.py │ │ ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_groie_1x_coco.py │ │ └── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ ├── guided_anchoring │ │ ├── README.md │ │ ├── ga_fast_r50_caffe_fpn_1x_coco.py │ │ ├── ga_faster_r101_caffe_fpn_1x_coco.py │ │ ├── ga_faster_r50_caffe_fpn_1x_coco.py │ │ ├── ga_faster_r50_fpn_1x_coco.py │ │ ├── ga_faster_x101_32x4d_fpn_1x_coco.py │ │ ├── ga_faster_x101_64x4d_fpn_1x_coco.py │ │ ├── ga_retinanet_r101_caffe_fpn_1x_coco.py │ │ ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py │ │ ├── ga_retinanet_r50_caffe_fpn_1x_coco.py │ │ ├── ga_retinanet_r50_fpn_1x_coco.py │ │ ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py │ │ ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py │ │ ├── ga_rpn_r101_caffe_fpn_1x_coco.py │ │ ├── ga_rpn_r50_caffe_fpn_1x_coco.py │ │ ├── ga_rpn_r50_fpn_1x_coco.py │ │ ├── ga_rpn_x101_32x4d_fpn_1x_coco.py │ │ └── ga_rpn_x101_64x4d_fpn_1x_coco.py │ ├── hrnet │ │ ├── README.md │ │ ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py │ │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py │ │ ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py │ │ ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py │ │ ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py │ │ ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py │ │ ├── faster_rcnn_hrnetv2p_w18_1x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w18_2x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w32_1x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w32_2x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w40_1x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w40_2x_coco.py │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── htc_hrnetv2p_w18_20e_coco.py │ │ ├── htc_hrnetv2p_w32_20e_coco.py │ │ ├── htc_hrnetv2p_w40_20e_coco.py │ │ ├── htc_hrnetv2p_w40_28e_coco.py │ │ ├── htc_x101_64x4d_fpn_16x1_28e_coco.py │ │ ├── mask_rcnn_hrnetv2p_w18_1x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w18_2x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w32_1x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w32_2x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w40_1x_coco.py │ │ └── mask_rcnn_hrnetv2p_w40_2x_coco.py │ ├── htc │ │ ├── README.md │ │ ├── htc_r101_fpn_20e_causal.py │ │ ├── htc_r101_fpn_20e_coco.py │ │ ├── htc_r50_fpn_1x_coco.py │ │ ├── htc_r50_fpn_20e_coco.py │ │ ├── htc_without_semantic_r50_fpn_1x_coco.py │ │ ├── htc_without_semantic_r50_fpn_1x_lvis.py │ │ ├── htc_x101_32x4d_fpn_16x1_20e_coco.py │ │ ├── htc_x101_64x4d_fpn_16x1_20e_coco.py │ │ ├── htc_x101_64x4d_fpn_20e_16gpu_causal.py │ │ └── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py │ ├── instaboost │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ │ ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ └── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ ├── legacy_1.x │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py │ │ ├── faster_rcnn_r50_fpn_1x_coco_v1.py │ │ ├── mask_rcnn_r50_fpn_1x_coco_v1.py │ │ ├── retinanet_r50_caffe_fpn_1x_coco_v1.py │ │ ├── retinanet_r50_fpn_1x_coco_v1.py │ │ └── ssd300_coco_v1.py │ ├── libra_rcnn │ │ ├── README.md │ │ ├── libra_fast_rcnn_r50_fpn_1x_coco.py │ │ ├── libra_faster_rcnn_r101_fpn_1x_coco.py │ │ ├── libra_faster_rcnn_r50_fpn_1x_coco.py │ │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── libra_retinanet_r50_fpn_1x_coco.py │ ├── lvis │ │ ├── README.md │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ ├── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ ├── mask_rcnn │ │ ├── README.md │ │ ├── mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_2x_coco.py │ │ ├── mask_rcnn_r50_caffe_c4_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py │ │ ├── mask_rcnn_r50_fpn_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_2x_coco.py │ │ ├── mask_rcnn_r50_fpn_poly_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py │ │ ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py │ │ ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── mask_rcnn_x101_64x4d_fpn_2x_coco.py │ ├── ms_rcnn │ │ ├── README.md │ │ ├── ms_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── ms_rcnn_r101_caffe_fpn_2x_coco.py │ │ ├── ms_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── ms_rcnn_r50_caffe_fpn_2x_coco.py │ │ ├── ms_rcnn_r50_fpn_1x_coco.py │ │ ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── ms_rcnn_x101_64x4d_fpn_2x_coco.py │ ├── nas_fcos │ │ ├── README.md │ │ ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ ├── nas_fpn │ │ ├── README.md │ │ ├── retinanet_r50_fpn_crop640_50e_coco.py │ │ └── retinanet_r50_nasfpn_crop640_50e_coco.py │ ├── paa │ │ ├── README.md │ │ ├── paa_r101_fpn_1x_coco.py │ │ ├── paa_r101_fpn_2x_coco.py │ │ ├── paa_r50_fpn_1.5x_coco.py │ │ ├── paa_r50_fpn_1x_coco.py │ │ └── paa_r50_fpn_2x_coco.py │ ├── pafpn │ │ ├── README.md │ │ └── faster_rcnn_r50_pafpn_1x_coco.py │ ├── pascal_voc │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ │ ├── retinanet_r50_fpn_1x_voc0712.py │ │ ├── ssd300_voc0712.py │ │ └── ssd512_voc0712.py │ ├── pisa │ │ ├── README.md │ │ ├── pisa_faster_rcnn_r50_fpn_1x_coco.py │ │ ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── pisa_mask_rcnn_r50_fpn_1x_coco.py │ │ ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── pisa_retinanet_r50_fpn_1x_coco.py │ │ ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py │ │ ├── pisa_ssd300_coco.py │ │ └── pisa_ssd512_coco.py │ ├── point_rend │ │ ├── README.md │ │ ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py │ │ └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py │ ├── regnet │ │ ├── README.md │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py │ │ ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py │ │ ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py │ │ └── retinanet_regnetx-800MF_fpn_1x_coco.py │ ├── reppoints │ │ ├── README.md │ │ ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py │ │ ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py │ │ ├── reppoints.png │ │ ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py │ │ ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py │ │ ├── reppoints_moment_r50_fpn_1x_coco.py │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py │ │ ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py │ ├── res2net │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py │ │ ├── cascade_rcnn_r2_101_fpn_20e_coco.py │ │ ├── faster_rcnn_r2_101_fpn_2x_coco.py │ │ ├── htc_r2_101_fpn_20e_coco.py │ │ └── mask_rcnn_r2_101_fpn_2x_coco.py │ ├── retinanet │ │ ├── README.md │ │ ├── retinanet_r101_caffe_fpn_1x_coco.py │ │ ├── retinanet_r101_fpn_1x_coco.py │ │ ├── retinanet_r101_fpn_2x_coco.py │ │ ├── retinanet_r50_caffe_fpn_1x_coco.py │ │ ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py │ │ ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py │ │ ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py │ │ ├── retinanet_r50_fpn_1x_coco.py │ │ ├── retinanet_r50_fpn_2x_coco.py │ │ ├── retinanet_x101_32x4d_fpn_1x_coco.py │ │ ├── retinanet_x101_32x4d_fpn_2x_coco.py │ │ ├── retinanet_x101_64x4d_fpn_1x_coco.py │ │ └── retinanet_x101_64x4d_fpn_2x_coco.py │ ├── rpn │ │ ├── README.md │ │ ├── rpn_r101_caffe_fpn_1x_coco.py │ │ ├── rpn_r101_fpn_1x_coco.py │ │ ├── rpn_r101_fpn_2x_coco.py │ │ ├── rpn_r50_caffe_c4_1x_coco.py │ │ ├── rpn_r50_caffe_fpn_1x_coco.py │ │ ├── rpn_r50_fpn_1x_coco.py │ │ ├── rpn_r50_fpn_2x_coco.py │ │ ├── rpn_x101_32x4d_fpn_1x_coco.py │ │ ├── rpn_x101_32x4d_fpn_2x_coco.py │ │ ├── rpn_x101_64x4d_fpn_1x_coco.py │ │ └── rpn_x101_64x4d_fpn_2x_coco.py │ ├── sabl │ │ ├── README.md │ │ ├── sabl_cascade_rcnn_r101_fpn_1x_coco.py │ │ ├── sabl_cascade_rcnn_r50_fpn_1x_coco.py │ │ ├── sabl_faster_rcnn_r101_fpn_1x_coco.py │ │ ├── sabl_faster_rcnn_r50_fpn_1x_coco.py │ │ ├── sabl_retinanet_r101_fpn_1x_coco.py │ │ ├── sabl_retinanet_r101_fpn_gn_1x_coco.py │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py │ │ ├── sabl_retinanet_r50_fpn_1x_coco.py │ │ └── sabl_retinanet_r50_fpn_gn_1x_coco.py │ ├── scratch │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ │ └── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ ├── ssd │ │ ├── README.md │ │ ├── ssd300_coco.py │ │ └── ssd512_coco.py │ ├── wider_face │ │ ├── README.md │ │ └── ssd300_wider_face.py │ └── yolo │ │ ├── README.md │ │ ├── yolov3_d53_320_273e_coco.py │ │ ├── yolov3_d53_mstrain-416_273e_coco.py │ │ └── yolov3_d53_mstrain-608_273e_coco.py ├── demo │ ├── MMDet_Tutorial.ipynb │ ├── demo.jpg │ ├── image_demo.py │ ├── inference_demo.ipynb │ └── webcam_demo.py ├── docker │ └── Dockerfile ├── docs │ ├── Makefile │ ├── api.rst │ ├── changelog.md │ ├── compatibility.md │ ├── conf.py │ ├── config.md │ ├── getting_started.md │ ├── index.rst │ ├── install.md │ ├── make.bat │ ├── model_zoo.md │ ├── projects.md │ ├── robustness_benchmarking.md │ ├── trouble_shooting.md │ └── tutorials │ │ ├── data_pipeline.md │ │ ├── finetune.md │ │ ├── index.rst │ │ ├── new_dataset.md │ │ └── new_modules.md ├── lvis.png ├── mmdet │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ ├── anchor │ │ │ ├── __init__.py │ │ │ ├── anchor_generator.py │ │ │ ├── builder.py │ │ │ ├── point_generator.py │ │ │ └── utils.py │ │ ├── bbox │ │ │ ├── __init__.py │ │ │ ├── assigners │ │ │ │ ├── __init__.py │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ ├── assign_result.py │ │ │ │ ├── atss_assigner.py │ │ │ │ ├── base_assigner.py │ │ │ │ ├── center_region_assigner.py │ │ │ │ ├── grid_assigner.py │ │ │ │ ├── max_iou_assigner.py │ │ │ │ └── point_assigner.py │ │ │ ├── builder.py │ │ │ ├── coder │ │ │ │ ├── __init__.py │ │ │ │ ├── base_bbox_coder.py │ │ │ │ ├── bucketing_bbox_coder.py │ │ │ │ ├── delta_xywh_bbox_coder.py │ │ │ │ ├── legacy_delta_xywh_bbox_coder.py │ │ │ │ ├── pseudo_bbox_coder.py │ │ │ │ ├── tblr_bbox_coder.py │ │ │ │ └── yolo_bbox_coder.py │ │ │ ├── demodata.py │ │ │ ├── iou_calculators │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ └── iou2d_calculator.py │ │ │ ├── samplers │ │ │ │ ├── __init__.py │ │ │ │ ├── base_sampler.py │ │ │ │ ├── combined_sampler.py │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ ├── ohem_sampler.py │ │ │ │ ├── pseudo_sampler.py │ │ │ │ ├── random_sampler.py │ │ │ │ ├── sampling_result.py │ │ │ │ └── score_hlr_sampler.py │ │ │ └── transforms.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── bbox_overlaps.py │ │ │ ├── class_names.py │ │ │ ├── eval_hooks.py │ │ │ ├── mean_ap.py │ │ │ └── recall.py │ │ ├── fp16 │ │ │ ├── __init__.py │ │ │ ├── decorators.py │ │ │ ├── hooks.py │ │ │ └── utils.py │ │ ├── mask │ │ │ ├── __init__.py │ │ │ ├── mask_target.py │ │ │ ├── structures.py │ │ │ └── utils.py │ │ ├── post_processing │ │ │ ├── __init__.py │ │ │ ├── bbox_nms.py │ │ │ └── merge_augs.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── dist_utils.py │ │ │ └── misc.py │ ├── datasets │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── cityscapes.py │ │ ├── coco.py │ │ ├── custom.py │ │ ├── dataset_wrappers.py │ │ ├── deepfashion.py │ │ ├── lvis.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── auto_augment.py │ │ │ ├── compose.py │ │ │ ├── formating.py │ │ │ ├── instaboost.py │ │ │ ├── loading.py │ │ │ ├── test_time_aug.py │ │ │ └── transforms.py │ │ ├── samplers │ │ │ ├── __init__.py │ │ │ ├── distributed_sampler.py │ │ │ └── group_sampler.py │ │ ├── utils.py │ │ ├── voc.py │ │ ├── wider_face.py │ │ └── xml_style.py │ ├── models │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── darknet.py │ │ │ ├── detectors_resnet.py │ │ │ ├── detectors_resnext.py │ │ │ ├── hourglass.py │ │ │ ├── hrnet.py │ │ │ ├── regnet.py │ │ │ ├── res2net.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ └── ssd_vgg.py │ │ ├── builder.py │ │ ├── dense_heads │ │ │ ├── __init__.py │ │ │ ├── anchor_free_head.py │ │ │ ├── anchor_head.py │ │ │ ├── atss_head.py │ │ │ ├── base_dense_head.py │ │ │ ├── corner_head.py │ │ │ ├── fcos_head.py │ │ │ ├── fovea_head.py │ │ │ ├── free_anchor_retina_head.py │ │ │ ├── fsaf_head.py │ │ │ ├── ga_retina_head.py │ │ │ ├── ga_rpn_head.py │ │ │ ├── gfl_head.py │ │ │ ├── guided_anchor_head.py │ │ │ ├── nasfcos_head.py │ │ │ ├── paa_head.py │ │ │ ├── pisa_retinanet_head.py │ │ │ ├── pisa_ssd_head.py │ │ │ ├── reppoints_head.py │ │ │ ├── retina_head.py │ │ │ ├── retina_sepbn_head.py │ │ │ ├── rpn_head.py │ │ │ ├── rpn_test_mixin.py │ │ │ ├── sabl_retina_head.py │ │ │ ├── ssd_head.py │ │ │ └── yolo_head.py │ │ ├── detectors │ │ │ ├── __init__.py │ │ │ ├── atss.py │ │ │ ├── base.py │ │ │ ├── cascade_rcnn.py │ │ │ ├── cornernet.py │ │ │ ├── fast_rcnn.py │ │ │ ├── faster_rcnn.py │ │ │ ├── fcos.py │ │ │ ├── fovea.py │ │ │ ├── fsaf.py │ │ │ ├── gfl.py │ │ │ ├── grid_rcnn.py │ │ │ ├── htc.py │ │ │ ├── mask_rcnn.py │ │ │ ├── mask_scoring_rcnn.py │ │ │ ├── nasfcos.py │ │ │ ├── paa.py │ │ │ ├── point_rend.py │ │ │ ├── reppoints_detector.py │ │ │ ├── retinanet.py │ │ │ ├── rpn.py │ │ │ ├── single_stage.py │ │ │ ├── two_stage.py │ │ │ └── yolo.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── accuracy.py │ │ │ ├── ae_loss.py │ │ │ ├── balanced_l1_loss.py │ │ │ ├── cross_entropy_loss.py │ │ │ ├── focal_loss.py │ │ │ ├── gaussian_focal_loss.py │ │ │ ├── gfocal_loss.py │ │ │ ├── ghm_loss.py │ │ │ ├── iou_loss.py │ │ │ ├── mse_loss.py │ │ │ ├── pisa_loss.py │ │ │ ├── smooth_l1_loss.py │ │ │ └── utils.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ ├── bfp.py │ │ │ ├── fpn.py │ │ │ ├── fpn_carafe.py │ │ │ ├── hrfpn.py │ │ │ ├── nas_fpn.py │ │ │ ├── nasfcos_fpn.py │ │ │ ├── pafpn.py │ │ │ ├── rfp.py │ │ │ └── yolo_neck.py │ │ ├── roi_heads │ │ │ ├── __init__.py │ │ │ ├── base_roi_head.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_head.py │ │ │ │ ├── convfc_bbox_head.py │ │ │ │ ├── double_bbox_head.py │ │ │ │ └── sabl_head.py │ │ │ ├── cascade_roi_head.py │ │ │ ├── double_roi_head.py │ │ │ ├── dynamic_roi_head.py │ │ │ ├── grid_roi_head.py │ │ │ ├── htc_roi_head.py │ │ │ ├── mask_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── coarse_mask_head.py │ │ │ │ ├── fcn_mask_head.py │ │ │ │ ├── fused_semantic_head.py │ │ │ │ ├── grid_head.py │ │ │ │ ├── htc_mask_head.py │ │ │ │ ├── mask_point_head.py │ │ │ │ └── maskiou_head.py │ │ │ ├── mask_scoring_roi_head.py │ │ │ ├── pisa_roi_head.py │ │ │ ├── point_rend_roi_head.py │ │ │ ├── roi_extractors │ │ │ │ ├── __init__.py │ │ │ │ ├── base_roi_extractor.py │ │ │ │ ├── generic_roi_extractor.py │ │ │ │ └── single_level_roi_extractor.py │ │ │ ├── shared_heads │ │ │ │ ├── __init__.py │ │ │ │ └── res_layer.py │ │ │ ├── standard_roi_head.py │ │ │ └── test_mixins.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── gaussian_target.py │ │ │ └── res_layer.py │ ├── ops │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── collect_env.py │ │ ├── contextmanagers.py │ │ ├── logger.py │ │ ├── profiling.py │ │ └── util_mixins.py │ └── version.py ├── pytest.ini ├── requirements.txt ├── requirements │ ├── build.txt │ ├── docs.txt │ ├── optional.txt │ ├── readthedocs.txt │ ├── runtime.txt │ └── tests.txt ├── resources │ ├── coco_test_12510.jpg │ ├── corruptions_sev_3.png │ ├── data_pipeline.png │ ├── loss_curve.png │ └── mmdet-logo.png ├── setup.cfg ├── setup.py ├── tests │ ├── async_benchmark.py │ ├── test_anchor.py │ ├── test_assigner.py │ ├── test_async.py │ ├── test_coder.py │ ├── test_config.py │ ├── test_data │ │ ├── test_dataset.py │ │ ├── test_formatting.py │ │ ├── test_loading.py │ │ ├── test_models_aug_test.py │ │ ├── test_sampler.py │ │ ├── test_transform.py │ │ └── test_utils.py │ ├── test_fp16.py │ ├── test_masks.py │ ├── test_models │ │ ├── test_backbones.py │ │ ├── test_forward.py │ │ ├── test_heads.py │ │ ├── test_losses.py │ │ ├── test_necks.py │ │ ├── test_pisa_heads.py │ │ └── test_roi_extractor.py │ └── test_version.py └── tools │ ├── analyze_logs.py │ ├── benchmark.py │ ├── browse_dataset.py │ ├── coco_error_analysis.py │ ├── convert_datasets │ ├── cityscapes.py │ └── pascal_voc.py │ ├── detectron2pytorch.py │ ├── dist_test.sh │ ├── dist_train.sh │ ├── eval_metric.py │ ├── get_flops.py │ ├── print_config.py │ ├── publish_model.py │ ├── pytorch2onnx.py │ ├── regnet2mmdet.py │ ├── robustness_eval.py │ ├── slurm_test.sh │ ├── slurm_train.sh │ ├── test.py │ ├── test_robustness.py │ ├── train.py │ └── upgrade_model_version.py └── lvis_old ├── .github ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md └── ISSUE_TEMPLATE │ ├── config.yml │ ├── error-report.md │ ├── feature_request.md │ └── general_questions.md ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .style.yapf ├── .travis.yml ├── LICENSE ├── README.md ├── configs ├── albu_example │ └── mask_rcnn_r50_fpn_1x.py ├── atss │ ├── README.md │ └── atss_r50_fpn_1x.py ├── carafe │ ├── README.md │ ├── faster_rcnn_r50_fpn_carafe_1x.py │ └── mask_rcnn_r50_fpn_carafe_1x.py ├── cascade_mask_rcnn_r101_fpn_1x.py ├── cascade_mask_rcnn_r50_caffe_c4_1x.py ├── cascade_mask_rcnn_r50_fpn_1x.py ├── cascade_mask_rcnn_x101_32x4d_fpn_1x.py ├── cascade_mask_rcnn_x101_64x4d_fpn_1x.py ├── cascade_rcnn_r101_fpn_1x.py ├── cascade_rcnn_r50_caffe_c4_1x.py ├── cascade_rcnn_r50_fpn_1x.py ├── cascade_rcnn_x101_32x4d_fpn_1x.py ├── cascade_rcnn_x101_64x4d_fpn_1x.py ├── cityscapes │ ├── README.md │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py │ └── mask_rcnn_r50_fpn_1x_cityscapes.py ├── dcn │ ├── README.md │ ├── cascade_mask_rcnn_dconv_c3-c5_r50_fpn_1x.py │ ├── cascade_rcnn_dconv_c3-c5_r50_fpn_1x.py │ ├── faster_rcnn_dconv_c3-c5_r50_fpn_1x.py │ ├── faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x.py │ ├── faster_rcnn_dpool_r50_fpn_1x.py │ ├── faster_rcnn_mdconv_c3-c5_group4_r50_fpn_1x.py │ ├── faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py │ ├── faster_rcnn_mdpool_r50_fpn_1x.py │ ├── mask_rcnn_dconv_c3-c5_r50_fpn_1x.py │ └── mask_rcnn_mdconv_c3-c5_r50_fpn_1x.py ├── double_heads │ └── dh_faster_rcnn_r50_fpn_1x.py ├── empirical_attention │ ├── README.md │ ├── faster_rcnn_r50_fpn_attention_0010_1x.py │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x.py │ ├── faster_rcnn_r50_fpn_attention_1111_1x.py │ └── faster_rcnn_r50_fpn_attention_1111_dcn_1x.py ├── fast_mask_rcnn_r101_fpn_1x.py ├── fast_mask_rcnn_r50_caffe_c4_1x.py ├── fast_mask_rcnn_r50_fpn_1x.py ├── fast_rcnn_r101_fpn_1x.py ├── fast_rcnn_r50_caffe_c4_1x.py ├── fast_rcnn_r50_fpn_1x.py ├── faster_rcnn_ohem_r50_fpn_1x.py ├── faster_rcnn_r101_fpn_1x.py ├── faster_rcnn_r50_caffe_c4_1x.py ├── faster_rcnn_r50_fpn_1x.py ├── faster_rcnn_x101_32x4d_fpn_1x.py ├── faster_rcnn_x101_64x4d_fpn_1x.py ├── fcos │ ├── README.md │ ├── fcos_center_r50_caffe_fpn_gn_1x_4gpu.py.py │ ├── fcos_mstrain_640_800_r101_caffe_fpn_gn_2x_4gpu.py │ ├── fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x.py │ └── fcos_r50_caffe_fpn_gn_1x_4gpu.py ├── foveabox │ ├── README.md │ ├── fovea_align_gn_ms_r101_fpn_4gpu_2x.py │ ├── fovea_align_gn_ms_r50_fpn_4gpu_2x.py │ ├── fovea_align_gn_r101_fpn_4gpu_2x.py │ ├── fovea_align_gn_r50_fpn_4gpu_2x.py │ └── fovea_r50_fpn_4gpu_1x.py ├── fp16 │ ├── faster_rcnn_r50_fpn_fp16_1x.py │ ├── mask_rcnn_r50_fpn_fp16_1x.py │ └── retinanet_r50_fpn_fp16_1x.py ├── free_anchor │ ├── README.md │ ├── retinanet_free_anchor_r101_fpn_1x.py │ ├── retinanet_free_anchor_r50_fpn_1x.py │ └── retinanet_free_anchor_x101-32x4d_fpn_1x.py ├── gcnet │ ├── README.md │ ├── mask_rcnn_r16_gcb_c3-c5_r50_fpn_1x.py │ ├── mask_rcnn_r16_gcb_c3-c5_r50_fpn_syncbn_1x.py │ ├── mask_rcnn_r4_gcb_c3-c5_r50_fpn_1x.py │ ├── mask_rcnn_r4_gcb_c3-c5_r50_fpn_syncbn_1x.py │ └── mask_rcnn_r50_fpn_sbn_1x.py ├── ghm │ ├── README.md │ └── retinanet_ghm_r50_fpn_1x.py ├── gn+ws │ ├── README.md │ ├── faster_rcnn_r50_fpn_gn_ws_1x.py │ ├── mask_rcnn_r50_fpn_gn_ws_20_23_24e.py │ ├── mask_rcnn_r50_fpn_gn_ws_2x.py │ └── mask_rcnn_x101_32x4d_fpn_gn_ws_2x.py ├── gn │ ├── README.md │ ├── mask_rcnn_r101_fpn_gn_2x.py │ ├── mask_rcnn_r50_fpn_gn_2x.py │ └── mask_rcnn_r50_fpn_gn_contrib_2x.py ├── grid_rcnn │ ├── README.md │ ├── grid_rcnn_gn_head_r50_fpn_2x.py │ └── grid_rcnn_gn_head_x101_32x4d_fpn_2x.py ├── guided_anchoring │ ├── README.md │ ├── ga_fast_r50_caffe_fpn_1x.py │ ├── ga_faster_r50_caffe_fpn_1x.py │ ├── ga_faster_x101_32x4d_fpn_1x.py │ ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py │ ├── ga_retinanet_r50_caffe_fpn_1x.py │ ├── ga_retinanet_x101_32x4d_fpn_1x.py │ ├── ga_rpn_r101_caffe_rpn_1x.py │ ├── ga_rpn_r50_caffe_fpn_1x.py │ └── ga_rpn_x101_32x4d_fpn_1x.py ├── hrnet │ ├── README.md │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e.py │ ├── cascade_rcnn_hrnetv2p_w32_20e.py │ ├── faster_rcnn_hrnetv2p_w18_1x.py │ ├── faster_rcnn_hrnetv2p_w32_1x.py │ ├── faster_rcnn_hrnetv2p_w40_1x.py │ ├── fcos_hrnetv2p_w32_gn_1x_4gpu.py │ ├── htc_hrnetv2p_w32_20e.py │ ├── mask_rcnn_hrnetv2p_w18_1x.py │ └── mask_rcnn_hrnetv2p_w32_1x.py ├── htc │ ├── README.md │ ├── htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py │ ├── htc_r101_fpn_20e_causal.py │ ├── htc_r50_fpn_1x.py │ ├── htc_r50_fpn_20e.py │ ├── htc_without_semantic_r50_fpn_1x.py │ ├── htc_x101_32x4d_fpn_20e_16gpu.py │ ├── htc_x101_32x4d_fpn_20e_16gpu_example.py │ ├── htc_x101_64x4d_fpn_20e_16gpu.py │ ├── htc_x101_64x4d_fpn_20e_16gpu_causal.py │ └── htc_x101_64x4d_fpn_20e_16gpu_causal_finetune.py ├── instaboost │ ├── README.md │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x.py │ ├── mask_rcnn_r50_fpn_instaboost_4x.py │ └── ssd300_coco_instaboost_4x.py ├── libra_rcnn │ ├── README.md │ ├── libra_fast_rcnn_r50_fpn_1x.py │ ├── libra_faster_rcnn_r101_fpn_1x.py │ ├── libra_faster_rcnn_r50_fpn_1x.py │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x.py │ └── libra_retinanet_r50_fpn_1x.py ├── mask_rcnn_r101_fpn_1x.py ├── mask_rcnn_r50_caffe_c4_1x.py ├── mask_rcnn_r50_fpn_1x.py ├── mask_rcnn_x101_32x4d_fpn_1x.py ├── mask_rcnn_x101_64x4d_fpn_1x.py ├── ms_rcnn │ ├── README.md │ ├── ms_rcnn_r101_caffe_fpn_1x.py │ ├── ms_rcnn_r50_caffe_fpn_1x.py │ └── ms_rcnn_x101_64x4d_fpn_1x.py ├── nas_fpn │ ├── README.md │ ├── retinanet_crop640_r50_fpn_50e.py │ └── retinanet_crop640_r50_nasfpn_50e.py ├── pascal_voc │ ├── README.md │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ ├── ssd300_voc.py │ └── ssd512_voc.py ├── reppoints │ ├── README.md │ ├── bbox_r50_grid_center_fpn_1x.py │ ├── bbox_r50_grid_fpn_1x.py │ ├── reppoints.png │ ├── reppoints_minmax_r50_fpn_1x.py │ ├── reppoints_moment_r101_dcn_fpn_2x.py │ ├── reppoints_moment_r101_dcn_fpn_2x_mt.py │ ├── reppoints_moment_r101_fpn_2x.py │ ├── reppoints_moment_r101_fpn_2x_mt.py │ ├── reppoints_moment_r50_fpn_1x.py │ ├── reppoints_moment_r50_fpn_2x.py │ ├── reppoints_moment_r50_fpn_2x_mt.py │ ├── reppoints_moment_r50_no_gn_fpn_1x.py │ ├── reppoints_moment_x101_dcn_fpn_2x.py │ ├── reppoints_moment_x101_dcn_fpn_2x_mt.py │ └── reppoints_partial_minmax_r50_fpn_1x.py ├── retinanet_r101_fpn_1x.py ├── retinanet_r50_fpn_1x.py ├── retinanet_x101_32x4d_fpn_1x.py ├── retinanet_x101_64x4d_fpn_1x.py ├── rpn_r101_fpn_1x.py ├── rpn_r50_caffe_c4_1x.py ├── rpn_r50_fpn_1x.py ├── rpn_x101_32x4d_fpn_1x.py ├── rpn_x101_64x4d_fpn_1x.py ├── scratch │ ├── README.md │ ├── scratch_faster_rcnn_r50_fpn_gn_6x.py │ └── scratch_mask_rcnn_r50_fpn_gn_6x.py ├── ssd300_coco.py ├── ssd512_coco.py └── wider_face │ ├── README.md │ └── ssd300_wider_face.py ├── demo ├── coco_test_12510.jpg ├── corruptions_sev_3.png ├── data_pipeline.png ├── demo.jpg ├── inference_demo.ipynb ├── loss_curve.png └── webcam_demo.py ├── docker └── Dockerfile ├── docs ├── CHANGELOG.md ├── GETTING_STARTED.md ├── INSTALL.md ├── MODEL_ZOO.md ├── Makefile ├── ROBUSTNESS_BENCHMARKING.md ├── TECHNICAL_DETAILS.md ├── conf.py ├── index.rst ├── make.bat └── requirements.txt ├── mmdet ├── __init__.py ├── apis │ ├── __init__.py │ ├── inference.py │ ├── test.py │ └── train.py ├── core │ ├── __init__.py │ ├── anchor │ │ ├── __init__.py │ │ ├── anchor_generator.py │ │ ├── anchor_target.py │ │ ├── guided_anchor_target.py │ │ ├── point_generator.py │ │ └── point_target.py │ ├── bbox │ │ ├── __init__.py │ │ ├── assign_sampling.py │ │ ├── assigners │ │ │ ├── __init__.py │ │ │ ├── approx_max_iou_assigner.py │ │ │ ├── assign_result.py │ │ │ ├── atss_assigner.py │ │ │ ├── base_assigner.py │ │ │ ├── max_iou_assigner.py │ │ │ └── point_assigner.py │ │ ├── bbox_target.py │ │ ├── demodata.py │ │ ├── geometry.py │ │ ├── samplers │ │ │ ├── __init__.py │ │ │ ├── base_sampler.py │ │ │ ├── combined_sampler.py │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ ├── ohem_sampler.py │ │ │ ├── pseudo_sampler.py │ │ │ ├── random_sampler.py │ │ │ └── sampling_result.py │ │ └── transforms.py │ ├── evaluation │ │ ├── __init__.py │ │ ├── bbox_overlaps.py │ │ ├── class_names.py │ │ ├── eval_hooks.py │ │ ├── mean_ap.py │ │ └── recall.py │ ├── fp16 │ │ ├── __init__.py │ │ ├── decorators.py │ │ ├── hooks.py │ │ └── utils.py │ ├── mask │ │ ├── __init__.py │ │ ├── mask_target.py │ │ └── utils.py │ ├── optimizer │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── copy_of_sgd.py │ │ └── registry.py │ ├── post_processing │ │ ├── __init__.py │ │ ├── bbox_nms.py │ │ └── merge_augs.py │ └── utils │ │ ├── __init__.py │ │ ├── dist_utils.py │ │ └── misc.py ├── datasets │ ├── LVIS.py │ ├── LVIS_utils.py │ ├── __init__.py │ ├── builder.py │ ├── cityscapes.py │ ├── coco.py │ ├── custom.py │ ├── dataset_wrappers.py │ ├── loader │ │ ├── __init__.py │ │ ├── build_loader.py │ │ └── sampler.py │ ├── pipelines │ │ ├── __init__.py │ │ ├── compose.py │ │ ├── formating.py │ │ ├── instaboost.py │ │ ├── loading.py │ │ ├── test_aug.py │ │ └── transforms.py │ ├── registry.py │ ├── voc.py │ ├── wider_face.py │ └── xml_style.py ├── models │ ├── __init__.py │ ├── anchor_heads │ │ ├── __init__.py │ │ ├── anchor_head.py │ │ ├── atss_head.py │ │ ├── fcos_head.py │ │ ├── fovea_head.py │ │ ├── free_anchor_retina_head.py │ │ ├── ga_retina_head.py │ │ ├── ga_rpn_head.py │ │ ├── guided_anchor_head.py │ │ ├── reppoints_head.py │ │ ├── retina_head.py │ │ ├── retina_sepbn_head.py │ │ ├── rpn_head.py │ │ └── ssd_head.py │ ├── backbones │ │ ├── __init__.py │ │ ├── hrnet.py │ │ ├── resnet.py │ │ ├── resnext.py │ │ └── ssd_vgg.py │ ├── bbox_heads │ │ ├── __init__.py │ │ ├── bbox_head.py │ │ ├── convfc_bbox_head.py │ │ └── double_bbox_head.py │ ├── builder.py │ ├── detectors │ │ ├── __init__.py │ │ ├── atss.py │ │ ├── base.py │ │ ├── cascade_rcnn.py │ │ ├── double_head_rcnn.py │ │ ├── fast_rcnn.py │ │ ├── faster_rcnn.py │ │ ├── fcos.py │ │ ├── fovea.py │ │ ├── grid_rcnn.py │ │ ├── htc.py │ │ ├── mask_rcnn.py │ │ ├── mask_scoring_rcnn.py │ │ ├── reppoints_detector.py │ │ ├── retinanet.py │ │ ├── rpn.py │ │ ├── single_stage.py │ │ ├── test_mixins.py │ │ └── two_stage.py │ ├── losses │ │ ├── __init__.py │ │ ├── accuracy.py │ │ ├── balanced_l1_loss.py │ │ ├── cross_entropy_loss.py │ │ ├── focal_loss.py │ │ ├── ghm_loss.py │ │ ├── iou_loss.py │ │ ├── mse_loss.py │ │ ├── smooth_l1_loss.py │ │ └── utils.py │ ├── mask_heads │ │ ├── __init__.py │ │ ├── fcn_mask_head.py │ │ ├── fused_semantic_head.py │ │ ├── grid_head.py │ │ ├── htc_mask_head.py │ │ └── maskiou_head.py │ ├── necks │ │ ├── __init__.py │ │ ├── bfp.py │ │ ├── fpn.py │ │ ├── fpn_carafe.py │ │ ├── hrfpn.py │ │ └── nas_fpn.py │ ├── plugins │ │ ├── __init__.py │ │ ├── generalized_attention.py │ │ └── non_local.py │ ├── registry.py │ ├── roi_extractors │ │ ├── __init__.py │ │ └── single_level.py │ ├── shared_heads │ │ ├── __init__.py │ │ └── res_layer.py │ └── utils │ │ ├── __init__.py │ │ ├── conv_module.py │ │ ├── conv_ws.py │ │ ├── norm.py │ │ ├── scale.py │ │ ├── upsample.py │ │ └── weight_init.py ├── ops │ ├── __init__.py │ ├── activation.py │ ├── affine_grid │ │ ├── __init__.py │ │ ├── affine_grid.py │ │ └── src │ │ │ └── affine_grid_cuda.cpp │ ├── carafe │ │ ├── __init__.py │ │ ├── carafe.py │ │ ├── grad_check.py │ │ ├── setup.py │ │ └── src │ │ │ ├── carafe_cuda.cpp │ │ │ ├── carafe_cuda_kernel.cu │ │ │ ├── carafe_naive_cuda.cpp │ │ │ └── carafe_naive_cuda_kernel.cu │ ├── context_block.py │ ├── conv.py │ ├── conv_module.py │ ├── conv_ws.py │ ├── dcn │ │ ├── __init__.py │ │ ├── deform_conv.py │ │ ├── deform_pool.py │ │ └── src │ │ │ ├── deform_conv_cuda.cpp │ │ │ ├── deform_conv_cuda_kernel.cu │ │ │ ├── deform_pool_cuda.cpp │ │ │ └── deform_pool_cuda_kernel.cu │ ├── generalized_attention.py │ ├── grid_sampler │ │ ├── __init__.py │ │ ├── grid_sampler.py │ │ └── src │ │ │ ├── cpu │ │ │ ├── grid_sampler_cpu.cpp │ │ │ └── grid_sampler_cpu.h │ │ │ ├── cuda │ │ │ ├── grid_sampler_cuda.cu │ │ │ └── grid_sampler_cuda.cuh │ │ │ ├── cudnn │ │ │ └── grid_sampler_cudnn.cpp │ │ │ └── grid_sampler.cpp │ ├── masked_conv │ │ ├── __init__.py │ │ ├── masked_conv.py │ │ └── src │ │ │ ├── masked_conv2d_cuda.cpp │ │ │ └── masked_conv2d_kernel.cu │ ├── nms │ │ ├── __init__.py │ │ ├── nms_wrapper.py │ │ └── src │ │ │ ├── nms_cpu.cpp │ │ │ ├── nms_cuda.cpp │ │ │ └── nms_kernel.cu │ ├── non_local.py │ ├── norm.py │ ├── roi_align │ │ ├── __init__.py │ │ ├── gradcheck.py │ │ ├── roi_align.py │ │ └── src │ │ │ ├── roi_align_cuda.cpp │ │ │ ├── roi_align_kernel.cu │ │ │ └── roi_align_kernel_v2.cu │ ├── roi_pool │ │ ├── __init__.py │ │ ├── gradcheck.py │ │ ├── roi_pool.py │ │ └── src │ │ │ ├── roi_pool_cuda.cpp │ │ │ └── roi_pool_kernel.cu │ ├── scale.py │ ├── sigmoid_focal_loss │ │ ├── __init__.py │ │ ├── sigmoid_focal_loss.py │ │ └── src │ │ │ ├── sigmoid_focal_loss.cpp │ │ │ └── sigmoid_focal_loss_cuda.cu │ ├── upsample.py │ └── utils │ │ ├── __init__.py │ │ └── src │ │ └── compiling_info.cpp └── utils │ ├── __init__.py │ ├── collect_env.py │ ├── contextmanagers.py │ ├── flops_counter.py │ ├── logger.py │ ├── profiling.py │ ├── registry.py │ └── util_mixins.py ├── pytest.ini ├── requirements.txt ├── requirements ├── build.txt ├── optional.txt ├── runtime.txt └── tests.txt ├── setup.py ├── tests ├── async_benchmark.py ├── test_assigner.py ├── test_async.py ├── test_config.py ├── test_forward.py ├── test_heads.py ├── test_nms.py ├── test_sampler.py ├── test_soft_nms.py └── test_utils.py └── tools ├── analyze_logs.py ├── browse_dataset.py ├── coco_error_analysis.py ├── convert_datasets ├── cityscapes.py └── pascal_voc.py ├── detectron2pytorch.py ├── dist_test.sh ├── dist_train.sh ├── fuse_conv_bn.py ├── get_flops.py ├── publish_model.py ├── pytorch2onnx.py ├── robustness_eval.py ├── slurm_test.sh ├── slurm_train.sh ├── test.py ├── test_robustness.py ├── train.py └── upgrade_model_version.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [KaihuaTang] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: tkhchipaomg 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: ['https://kaihuatang.github.io/donate'] 16 | -------------------------------------------------------------------------------- /NeurIPS 2020 Presentation - Full (1hr).pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/NeurIPS 2020 Presentation - Full (1hr).pptx -------------------------------------------------------------------------------- /NeurIPS 2020 Presentation - Short (5min).pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/NeurIPS 2020 Presentation - Short (5min).pptx -------------------------------------------------------------------------------- /classification/.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | *pkl 3 | *pyc 4 | .vscode 5 | *.sh 6 | runs 7 | .vscode -------------------------------------------------------------------------------- /classification/data/checkpoints/final_model_checkpoint.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/classification/data/checkpoints/final_model_checkpoint.pth -------------------------------------------------------------------------------- /classification/imagenet-lt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/classification/imagenet-lt.png -------------------------------------------------------------------------------- /classification/long-tailed-cifar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/classification/long-tailed-cifar.png -------------------------------------------------------------------------------- /classification/loss/SoftmaxLoss.py: -------------------------------------------------------------------------------- 1 | """Copyright (c) Facebook, Inc. and its affiliates. 2 | All rights reserved. 3 | 4 | This source code is licensed under the license found in the 5 | LICENSE file in the root directory of this source tree. 6 | 7 | Portions of the source code are from the OLTR project which 8 | notice below and in LICENSE in the root directory of 9 | this source tree. 10 | 11 | Copyright (c) 2019, Zhongqi Miao 12 | All rights reserved. 13 | """ 14 | 15 | 16 | import torch.nn as nn 17 | 18 | def create_loss (): 19 | print('Loading Softmax Loss.') 20 | return nn.CrossEntropyLoss() 21 | 22 | -------------------------------------------------------------------------------- /long-tailed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/long-tailed.png -------------------------------------------------------------------------------- /lvis1.0/.dev_scripts/linter.sh: -------------------------------------------------------------------------------- 1 | yapf -r -i mmdet/ configs/ tests/ tools/ 2 | isort -rc mmdet/ configs/ tests/ tools/ 3 | flake8 . 4 | -------------------------------------------------------------------------------- /lvis1.0/.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | 3 | contact_links: 4 | - name: Common Issues 5 | url: https://mmdetection.readthedocs.io/en/latest/trouble_shooting.html 6 | about: Check if your issue already has solutions 7 | - name: MMDetection Documentation 8 | url: https://mmdetection.readthedocs.io/en/latest/ 9 | about: Check if your question is answered in docs 10 | -------------------------------------------------------------------------------- /lvis1.0/.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the feature** 11 | 12 | **Motivation** 13 | A clear and concise description of the motivation of the feature. 14 | Ex1. It is inconvenient when [....]. 15 | Ex2. There is a recent paper [....], which is very helpful for [....]. 16 | 17 | **Related resources** 18 | If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. 19 | 20 | **Additional context** 21 | Add any other context or screenshots about the feature request here. 22 | If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. 23 | -------------------------------------------------------------------------------- /lvis1.0/.github/ISSUE_TEMPLATE/general_questions.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General questions 3 | about: Ask general questions to get help 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /lvis1.0/.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: deploy 2 | 3 | on: push 4 | 5 | jobs: 6 | build-n-publish: 7 | runs-on: ubuntu-latest 8 | if: startsWith(github.event.ref, 'refs/tags') 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Set up Python 3.7 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: 3.7 15 | - name: Install torch 16 | run: pip install torch 17 | - name: Install wheel 18 | run: pip install wheel 19 | - name: Build MMDetection 20 | run: python setup.py sdist bdist_wheel 21 | - name: Publish distribution to PyPI 22 | run: | 23 | pip install twine 24 | twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }} 25 | -------------------------------------------------------------------------------- /lvis1.0/.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | python: 4 | version: 3.7 5 | install: 6 | - requirements: requirements/docs.txt 7 | - requirements: requirements/readthedocs.txt 8 | -------------------------------------------------------------------------------- /lvis1.0/bg-fix-table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/bg-fix-table.png -------------------------------------------------------------------------------- /lvis1.0/bg-fix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/bg-fix.png -------------------------------------------------------------------------------- /lvis1.0/configs/_base_/datasets/lvis_v0.5_instance.py: -------------------------------------------------------------------------------- 1 | _base_ = 'coco_instance.py' 2 | dataset_type = 'LVISV05Dataset' 3 | data_root = 'data/lvis_v0.5/' 4 | data = dict( 5 | samples_per_gpu=2, 6 | workers_per_gpu=2, 7 | train=dict( 8 | _delete_=True, 9 | type='ClassBalancedDataset', 10 | oversample_thr=1e-3, 11 | dataset=dict( 12 | type=dataset_type, 13 | ann_file=data_root + 'annotations/lvis_v0.5_train.json', 14 | img_prefix=data_root + 'train2017/')), 15 | val=dict( 16 | type=dataset_type, 17 | ann_file=data_root + 'annotations/lvis_v0.5_val.json', 18 | img_prefix=data_root + 'val2017/'), 19 | test=dict( 20 | type=dataset_type, 21 | ann_file=data_root + 'annotations/lvis_v0.5_val.json', 22 | img_prefix=data_root + 'val2017/')) 23 | evaluation = dict(metric=['bbox', 'segm']) 24 | -------------------------------------------------------------------------------- /lvis1.0/configs/_base_/datasets/lvis_v1_instance.py: -------------------------------------------------------------------------------- 1 | _base_ = 'coco_instance.py' 2 | dataset_type = 'LVISV1Dataset' 3 | data_root = 'data/LVIS/' 4 | data = dict( 5 | samples_per_gpu=2, 6 | workers_per_gpu=2, 7 | train=dict( 8 | type=dataset_type, 9 | ann_file=data_root + 'lvis_v1_train.json', 10 | img_prefix=data_root + 'images'), 11 | val=dict( 12 | type=dataset_type, 13 | ann_file=data_root + 'lvis_v1_val.json', 14 | img_prefix=data_root + 'images'), 15 | test=dict( 16 | type=dataset_type, 17 | ann_file=data_root + 'lvis_v1_val.json', 18 | img_prefix=data_root + 'images')) 19 | evaluation = dict(metric=['bbox', 'segm']) 20 | -------------------------------------------------------------------------------- /lvis1.0/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | checkpoint_config = dict(interval=1) 2 | # yapf:disable 3 | log_config = dict( 4 | interval=50, 5 | hooks=[ 6 | dict(type='TextLoggerHook'), 7 | # dict(type='TensorboardLoggerHook') 8 | ]) 9 | # yapf:enable 10 | dist_params = dict(backend='nccl') 11 | log_level = 'INFO' 12 | load_from = None 13 | resume_from = None 14 | workflow = [('train', 1)] 15 | -------------------------------------------------------------------------------- /lvis1.0/configs/_base_/schedules/schedule_1x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[8, 11]) 11 | total_epochs = 12 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/_base_/schedules/schedule_20e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[16, 19]) 11 | total_epochs = 20 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/_base_/schedules/schedule_2x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[16, 22]) 11 | total_epochs = 24 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/albu_example/README.md: -------------------------------------------------------------------------------- 1 | ## Results and Models 2 | 3 | | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Download | 4 | |:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:--------:| 5 | | R-50 | pytorch | 1x | 4.4 | 16.6 | 38.0 | 34.5 |[model](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) | 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/atss/atss_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './atss_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(depth=101), 5 | ) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 19]) 4 | total_epochs = 20 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | type='CascadeRCNN', 4 | pretrained='open-mmlab://resnext101_64x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=64, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | style='pytorch')) 15 | -------------------------------------------------------------------------------- /lvis1.0/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | type='CascadeRCNN', 4 | pretrained='open-mmlab://resnext101_64x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=64, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | style='pytorch')) 15 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_roi_extractor=dict( 5 | type='SingleRoIExtractor', 6 | roi_layer=dict( 7 | _delete_=True, 8 | type='DeformRoIPoolPack', 9 | output_size=7, 10 | output_channels=256), 11 | out_channels=256, 12 | featmap_strides=[4, 8, 16, 32]))) 13 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_roi_extractor=dict( 5 | type='SingleRoIExtractor', 6 | roi_layer=dict( 7 | _delete_=True, 8 | type='ModulatedDeformRoIPoolPack', 9 | output_size=7, 10 | output_channels=256), 11 | out_channels=256, 12 | featmap_strides=[4, 8, 16, 32]))) 13 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch', 14 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 15 | stage_with_dcn=(False, True, True, True))) 16 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | model = dict( 7 | roi_head=dict( 8 | bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) 9 | # runtime settings 10 | total_epochs = 15 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | model = dict( 8 | backbone=dict( 9 | type='DetectoRS_ResNet', 10 | conv_cfg=dict(type='ConvAWS'), 11 | sac=dict(type='SAC', use_deform=True), 12 | stage_with_sac=(False, True, True, True))) 13 | -------------------------------------------------------------------------------- /lvis1.0/configs/detectors/htc_r50_rfp_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | backbone=dict( 5 | type='DetectoRS_ResNet', 6 | conv_cfg=dict(type='ConvAWS'), 7 | output_img=True), 8 | neck=dict( 9 | type='RFP', 10 | rfp_steps=2, 11 | aspp_out_channels=64, 12 | aspp_dilations=(1, 3, 6, 1), 13 | rfp_backbone=dict( 14 | rfp_inplanes=256, 15 | type='DetectoRS_ResNet', 16 | depth=50, 17 | num_stages=4, 18 | out_indices=(0, 1, 2, 3), 19 | frozen_stages=1, 20 | norm_cfg=dict(type='BN', requires_grad=True), 21 | norm_eval=True, 22 | conv_cfg=dict(type='ConvAWS'), 23 | pretrained='torchvision://resnet50', 24 | style='pytorch'))) 25 | -------------------------------------------------------------------------------- /lvis1.0/configs/detectors/htc_r50_sac_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | backbone=dict( 5 | type='DetectoRS_ResNet', 6 | conv_cfg=dict(type='ConvAWS'), 7 | sac=dict(type='SAC', use_deform=True), 8 | stage_with_sac=(False, True, True, True))) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict( 6 | type='GeneralizedAttention', 7 | spatial_range=-1, 8 | num_heads=8, 9 | attention_type='0010', 10 | kv_stride=2), 11 | stages=(False, False, True, True), 12 | position='after_conv2') 13 | ])) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | plugins=[ 5 | dict( 6 | cfg=dict( 7 | type='GeneralizedAttention', 8 | spatial_range=-1, 9 | num_heads=8, 10 | attention_type='0010', 11 | kv_stride=2), 12 | stages=(False, False, True, True), 13 | position='after_conv2') 14 | ], 15 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 16 | stage_with_dcn=(False, True, True, True))) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict( 6 | type='GeneralizedAttention', 7 | spatial_range=-1, 8 | num_heads=8, 9 | attention_type='1111', 10 | kv_stride=2), 11 | stages=(False, False, True, True), 12 | position='after_conv2') 13 | ])) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | plugins=[ 5 | dict( 6 | cfg=dict( 7 | type='GeneralizedAttention', 8 | spatial_range=-1, 9 | num_heads=8, 10 | attention_type='1111', 11 | kv_stride=2), 12 | stages=(False, False, True, True), 13 | position='after_conv2') 14 | ], 15 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 16 | stage_with_dcn=(False, True, True, True))) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/fast_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Fast R-CNN 2 | 3 | ## Introduction 4 | ``` 5 | @inproceedings{girshick2015fast, 6 | title={Fast r-cnn}, 7 | author={Girshick, Ross}, 8 | booktitle={Proceedings of the IEEE international conference on computer vision}, 9 | year={2015} 10 | } 11 | ``` 12 | 13 | ## Results and models 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | classes = ('person', 'bicycle', 'car') 3 | data = dict( 4 | train=dict(classes=classes), 5 | val=dict(classes=classes), 6 | test=dict(classes=classes)) 7 | # TODO: Update model url after bumping to V2.0 8 | load_from = 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth' # noqa 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | classes = ('person', ) 3 | data = dict( 4 | train=dict(classes=classes), 5 | val=dict(classes=classes), 6 | test=dict(classes=classes)) 7 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='IoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | train_cfg = dict(rcnn=dict(sampler=dict(type='OHEMSampler'))) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | test_cfg = dict( 8 | rcnn=dict( 9 | score_thr=0.05, 10 | nms=dict(type='soft_nms', iou_threshold=0.5), 11 | max_per_img=100)) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/fcos/fcos_r101_caffe_fpn_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/fcos/fcos_r101_caffe_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py'] 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(depth=101), 5 | bbox_head=dict( 6 | with_deform=True, 7 | norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) 8 | # learning policy 9 | lr_config = dict(step=[16, 22]) 10 | total_epochs = 24 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict( 3 | bbox_head=dict( 4 | with_deform=True, 5 | norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) 6 | # learning policy 7 | lr_config = dict(step=[16, 22]) 8 | total_epochs = 24 9 | optimizer_config = dict( 10 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | bbox_head=dict( 4 | _delete_=True, 5 | type='FreeAnchorRetinaHead', 6 | num_classes=80, 7 | in_channels=256, 8 | stacked_convs=4, 9 | feat_channels=256, 10 | anchor_generator=dict( 11 | type='AnchorGenerator', 12 | octave_base_scale=4, 13 | scales_per_octave=3, 14 | ratios=[0.5, 1.0, 2.0], 15 | strides=[8, 16, 32, 64, 128]), 16 | bbox_coder=dict( 17 | type='DeltaXYWHBBoxCoder', 18 | target_means=[.0, .0, .0, .0], 19 | target_stds=[0.1, 0.1, 0.2, 0.2]), 20 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75))) 21 | optimizer_config = dict( 22 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 23 | -------------------------------------------------------------------------------- /lvis1.0/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | style='pytorch')) 13 | -------------------------------------------------------------------------------- /lvis1.0/configs/fsaf/fsaf_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fsaf_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fsaf_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 16), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 4), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 16), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict(plugins=[ 4 | dict( 5 | cfg=dict(type='ContextBlock', ratio=1. / 4), 6 | stages=(False, True, True, True), 7 | position='after_conv3') 8 | ])) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 16), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), 5 | norm_eval=False, 6 | plugins=[ 7 | dict( 8 | cfg=dict(type='ContextBlock', ratio=1. / 4), 9 | stages=(False, True, True, True), 10 | position='after_conv3') 11 | ])) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict( 5 | type='ResNet', 6 | depth=101, 7 | num_stages=4, 8 | out_indices=(0, 1, 2, 3), 9 | frozen_stages=1, 10 | norm_cfg=dict(type='BN', requires_grad=True), 11 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 12 | stage_with_dcn=(False, True, True, True), 13 | norm_eval=True, 14 | style='pytorch')) 15 | -------------------------------------------------------------------------------- /lvis1.0/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict( 5 | type='ResNet', 6 | depth=101, 7 | num_stages=4, 8 | out_indices=(0, 1, 2, 3), 9 | frozen_stages=1, 10 | norm_cfg=dict(type='BN', requires_grad=True), 11 | norm_eval=True, 12 | style='pytorch')) 13 | -------------------------------------------------------------------------------- /lvis1.0/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | # multi-scale training 6 | img_norm_cfg = dict( 7 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 8 | train_pipeline = [ 9 | dict(type='LoadImageFromFile'), 10 | dict(type='LoadAnnotations', with_bbox=True), 11 | dict( 12 | type='Resize', 13 | img_scale=[(1333, 480), (1333, 800)], 14 | multiscale_mode='range', 15 | keep_ratio=True), 16 | dict(type='RandomFlip', flip_ratio=0.5), 17 | dict(type='Normalize', **img_norm_cfg), 18 | dict(type='Pad', size_divisor=32), 19 | dict(type='DefaultFormatBundle'), 20 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), 21 | ] 22 | data = dict(train=dict(pipeline=train_pipeline)) 23 | -------------------------------------------------------------------------------- /lvis1.0/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | type='GFL', 4 | pretrained='open-mmlab://resnext101_32x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=32, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 15 | stage_with_dcn=(False, False, True, True), 16 | norm_eval=True, 17 | style='pytorch')) 18 | -------------------------------------------------------------------------------- /lvis1.0/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' 2 | model = dict( 3 | type='GFL', 4 | pretrained='open-mmlab://resnext101_32x4d', 5 | backbone=dict( 6 | type='ResNeXt', 7 | depth=101, 8 | groups=32, 9 | base_width=4, 10 | num_stages=4, 11 | out_indices=(0, 1, 2, 3), 12 | frozen_stages=1, 13 | norm_cfg=dict(type='BN', requires_grad=True), 14 | norm_eval=True, 15 | style='pytorch')) 16 | -------------------------------------------------------------------------------- /lvis1.0/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | bbox_head=dict( 4 | loss_cls=dict( 5 | _delete_=True, 6 | type='GHMC', 7 | bins=30, 8 | momentum=0.75, 9 | use_sigmoid=True, 10 | loss_weight=1.0), 11 | loss_bbox=dict( 12 | _delete_=True, 13 | type='GHMR', 14 | mu=0.02, 15 | bins=10, 16 | momentum=0.7, 17 | loss_weight=10.0))) 18 | optimizer_config = dict( 19 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 20 | -------------------------------------------------------------------------------- /lvis1.0/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnet50_gn_ws', 6 | backbone=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 7 | neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 8 | roi_head=dict( 9 | bbox_head=dict( 10 | type='Shared4Conv1FCBBoxHead', 11 | conv_out_channels=256, 12 | conv_cfg=conv_cfg, 13 | norm_cfg=norm_cfg))) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', 6 | backbone=dict( 7 | type='ResNeXt', 8 | depth=101, 9 | groups=32, 10 | base_width=4, 11 | num_stages=4, 12 | out_indices=(0, 1, 2, 3), 13 | frozen_stages=1, 14 | style='pytorch', 15 | conv_cfg=conv_cfg, 16 | norm_cfg=norm_cfg)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws', 6 | backbone=dict( 7 | type='ResNeXt', 8 | depth=50, 9 | groups=32, 10 | base_width=4, 11 | num_stages=4, 12 | out_indices=(0, 1, 2, 3), 13 | frozen_stages=1, 14 | style='pytorch', 15 | conv_cfg=conv_cfg, 16 | norm_cfg=norm_cfg)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | conv_cfg = dict(type='ConvWS') 3 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 4 | model = dict( 5 | pretrained='open-mmlab://jhu/resnet50_gn_ws', 6 | backbone=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 7 | neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), 8 | roi_head=dict( 9 | bbox_head=dict( 10 | type='Shared4Conv1FCBBoxHead', 11 | conv_out_channels=256, 12 | conv_cfg=conv_cfg, 13 | norm_cfg=norm_cfg), 14 | mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg))) 15 | # learning policy 16 | lr_config = dict(step=[16, 22]) 17 | total_epochs = 24 18 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | # model settings 3 | conv_cfg = dict(type='ConvWS') 4 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 5 | model = dict( 6 | pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', 7 | backbone=dict( 8 | type='ResNeXt', 9 | depth=101, 10 | groups=32, 11 | base_width=4, 12 | num_stages=4, 13 | out_indices=(0, 1, 2, 3), 14 | frozen_stages=1, 15 | style='pytorch', 16 | conv_cfg=conv_cfg, 17 | norm_cfg=norm_cfg)) 18 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | # model settings 3 | conv_cfg = dict(type='ConvWS') 4 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 5 | model = dict( 6 | pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws', 7 | backbone=dict( 8 | type='ResNeXt', 9 | depth=50, 10 | groups=32, 11 | base_width=4, 12 | num_stages=4, 13 | out_indices=(0, 1, 2, 3), 14 | frozen_stages=1, 15 | style='pytorch', 16 | conv_cfg=conv_cfg, 17 | norm_cfg=norm_cfg)) 18 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_gn', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 3 | model = dict( 4 | pretrained='open-mmlab://contrib/resnet50_gn', 5 | backbone=dict(norm_cfg=norm_cfg), 6 | neck=dict(norm_cfg=norm_cfg), 7 | roi_head=dict( 8 | bbox_head=dict( 9 | type='Shared4Conv1FCBBoxHead', 10 | conv_out_channels=256, 11 | norm_cfg=norm_cfg), 12 | mask_head=dict(norm_cfg=norm_cfg))) 13 | # learning policy 14 | lr_config = dict(step=[16, 22]) 15 | total_epochs = 24 16 | -------------------------------------------------------------------------------- /lvis1.0/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' 2 | 3 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = ['../grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py'] 2 | # learning policy 3 | lr_config = dict( 4 | policy='step', 5 | warmup='linear', 6 | warmup_iters=500, 7 | warmup_ratio=0.001, 8 | step=[8, 11]) 9 | checkpoint_config = dict(interval=1) 10 | # runtime settings 11 | total_epochs = 12 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | style='pytorch')) 13 | # optimizer 14 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 15 | optimizer_config = dict(grad_clip=None) 16 | # learning policy 17 | lr_config = dict( 18 | policy='step', 19 | warmup='linear', 20 | warmup_iters=3665, 21 | warmup_ratio=1.0 / 80, 22 | step=[17, 23]) 23 | total_epochs = 25 24 | -------------------------------------------------------------------------------- /lvis1.0/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | style='pytorch')) 13 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_faster_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_faster_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://detectron2/resnet101_caffe', 5 | backbone=dict(depth=101)) 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w18', 5 | backbone=dict( 6 | extra=dict( 7 | stage2=dict(num_channels=(18, 36)), 8 | stage3=dict(num_channels=(18, 36, 72)), 9 | stage4=dict(num_channels=(18, 36, 72, 144)))), 10 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w40', 5 | backbone=dict( 6 | type='HRNet', 7 | extra=dict( 8 | stage2=dict(num_channels=(40, 80)), 9 | stage3=dict(num_channels=(40, 80, 160)), 10 | stage4=dict(num_channels=(40, 80, 160, 320)))), 11 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w18', 5 | backbone=dict( 6 | extra=dict( 7 | stage2=dict(num_channels=(18, 36)), 8 | stage3=dict(num_channels=(18, 36, 72)), 9 | stage4=dict(num_channels=(18, 36, 72, 144)))), 10 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w40', 5 | backbone=dict( 6 | type='HRNet', 7 | extra=dict( 8 | stage2=dict(num_channels=(40, 80)), 9 | stage3=dict(num_channels=(40, 80, 160)), 10 | stage4=dict(num_channels=(40, 80, 160, 320)))), 11 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 12 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://msra/hrnetv2_w18', 5 | backbone=dict( 6 | extra=dict( 7 | stage2=dict(num_channels=(18, 36)), 8 | stage3=dict(num_channels=(18, 36, 72)), 9 | stage4=dict(num_channels=(18, 36, 72, 144)))), 10 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w18_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_hrnetv2p_w32_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_hrnetv2p_w32_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_hrnetv2p_w40_20e_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[24, 27]) 4 | total_epochs = 28 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[24, 27]) 4 | total_epochs = 28 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w18', 4 | backbone=dict( 5 | extra=dict( 6 | stage2=dict(num_channels=(18, 36)), 7 | stage3=dict(num_channels=(18, 36, 72)), 8 | stage4=dict(num_channels=(18, 36, 72, 144)))), 9 | neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) 10 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://msra/hrnetv2_w40', 4 | backbone=dict( 5 | type='HRNet', 6 | extra=dict( 7 | stage2=dict(num_channels=(40, 80)), 8 | stage3=dict(num_channels=(40, 80, 160)), 9 | stage4=dict(num_channels=(40, 80, 160, 320)))), 10 | neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) 11 | -------------------------------------------------------------------------------- /lvis1.0/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/htc/htc_r101_fpn_20e_causal.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_without_semantic_r50_fpn_1x_lvis.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | # learning policy 4 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 5 | lr_config = dict(step=[16, 19]) 6 | total_epochs = 20 7 | -------------------------------------------------------------------------------- /lvis1.0/configs/htc/htc_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | # learning policy 4 | lr_config = dict(step=[16, 19]) 5 | total_epochs = 20 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/htc/htc_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 19]) 4 | total_epochs = 20 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | norm_eval=True, 14 | style='pytorch')) 15 | data = dict(samples_per_gpu=1, workers_per_gpu=1) 16 | # learning policy 17 | lr_config = dict(step=[16, 19]) 18 | total_epochs = 20 19 | -------------------------------------------------------------------------------- /lvis1.0/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | norm_eval=True, 14 | style='pytorch')) 15 | data = dict(samples_per_gpu=1, workers_per_gpu=1) 16 | # learning policy 17 | lr_config = dict(step=[16, 19]) 18 | total_epochs = 20 19 | -------------------------------------------------------------------------------- /lvis1.0/configs/htc/htc_x101_64x4d_fpn_20e_16gpu_causal.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_without_semantic_r50_fpn_1x_lvis.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | norm_eval=True, 14 | style='pytorch')) 15 | data = dict(samples_per_gpu=1, workers_per_gpu=1) 16 | # learning policy 17 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 18 | lr_config = dict(step=[16, 19]) 19 | total_epochs = 20 20 | -------------------------------------------------------------------------------- /lvis1.0/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | 3 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/retinanet_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | model = dict( 7 | bbox_head=dict( 8 | type='RetinaHead', 9 | anchor_generator=dict( 10 | type='LegacyAnchorGenerator', 11 | center_offset=0.5, 12 | octave_base_scale=4, 13 | scales_per_octave=3, 14 | ratios=[0.5, 1.0, 2.0], 15 | strides=[8, 16, 32, 64, 128]), 16 | bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), 17 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) 18 | -------------------------------------------------------------------------------- /lvis1.0/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | neck=[ 5 | dict( 6 | type='FPN', 7 | in_channels=[256, 512, 1024, 2048], 8 | out_channels=256, 9 | start_level=1, 10 | add_extra_convs='on_input', 11 | num_outs=5), 12 | dict( 13 | type='BFP', 14 | in_channels=256, 15 | num_levels=5, 16 | refine_level=1, 17 | refine_type='non_local') 18 | ], 19 | bbox_head=dict( 20 | loss_bbox=dict( 21 | _delete_=True, 22 | type='BalancedL1Loss', 23 | alpha=0.5, 24 | gamma=1.5, 25 | beta=0.11, 26 | loss_weight=1.0))) 27 | -------------------------------------------------------------------------------- /lvis1.0/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | type='MaskScoringRCNN', 4 | roi_head=dict( 5 | type='MaskScoringRoIHead', 6 | mask_iou_head=dict( 7 | type='MaskIoUHead', 8 | num_convs=4, 9 | num_fcs=2, 10 | roi_feat_size=14, 11 | in_channels=256, 12 | conv_out_channels=256, 13 | fc_out_channels=1024, 14 | num_classes=80))) 15 | # model training and testing settings 16 | train_cfg = dict(rcnn=dict(mask_thr_binary=0.5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | type='MaskScoringRCNN', 4 | roi_head=dict( 5 | type='MaskScoringRoIHead', 6 | mask_iou_head=dict( 7 | type='MaskIoUHead', 8 | num_convs=4, 9 | num_fcs=2, 10 | roi_feat_size=14, 11 | in_channels=256, 12 | conv_out_channels=256, 13 | fc_out_channels=1024, 14 | num_classes=80))) 15 | # model training and testing settings 16 | train_cfg = dict(rcnn=dict(mask_thr_binary=0.5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/paa/paa_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './paa_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/paa/paa_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './paa_r101_fpn_1x_coco.py' 2 | lr_config = dict(step=[16, 22]) 3 | total_epochs = 24 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/paa/paa_r50_fpn_1.5x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './paa_r50_fpn_1x_coco.py' 2 | lr_config = dict(step=[12, 16]) 3 | total_epochs = 18 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/paa/paa_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './paa_r50_fpn_1x_coco.py' 2 | lr_config = dict(step=[16, 22]) 3 | total_epochs = 24 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | neck=dict( 5 | type='PAFPN', 6 | in_channels=[256, 512, 1024, 2048], 7 | out_channels=256, 8 | num_outs=5)) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', 3 | '../_base_/default_runtime.py' 4 | ] 5 | model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) 8 | optimizer_config = dict(grad_clip=None) 9 | # learning policy 10 | # actual epoch = 3 * 3 = 9 11 | lr_config = dict(policy='step', step=[3]) 12 | # runtime settings 13 | total_epochs = 4 # actual epoch = 4 * 3 = 12 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', 3 | '../_base_/default_runtime.py' 4 | ] 5 | model = dict(bbox_head=dict(num_classes=20)) 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) 8 | optimizer_config = dict(grad_clip=None) 9 | # learning policy 10 | # actual epoch = 3 * 3 = 9 11 | lr_config = dict(policy='step', step=[3]) 12 | # runtime settings 13 | total_epochs = 4 # actual epoch = 4 * 3 = 12 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | bbox_head=dict( 5 | type='PISARetinaHead', 6 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) 7 | 8 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | bbox_head=dict( 5 | type='PISARetinaHead', 6 | loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) 7 | 8 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/pisa/pisa_ssd300_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../ssd/ssd300_coco.py' 2 | 3 | model = dict(bbox_head=dict(type='PISASSDHead')) 4 | 5 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 6 | 7 | optimizer_config = dict( 8 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/pisa/pisa_ssd512_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../ssd/ssd512_coco.py' 2 | 3 | model = dict(bbox_head=dict(type='PISASSDHead')) 4 | 5 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 6 | 7 | optimizer_config = dict( 8 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 9 | -------------------------------------------------------------------------------- /lvis1.0/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | lr_config = dict(step=[16, 22]) 3 | total_epochs = 24 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_12gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_12gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[224, 448, 896, 2240], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_3.2gf', 4 | backbone=dict( 5 | dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), 6 | stage_with_dcn=(False, True, True, True))) 7 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_4.0gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_4.0gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[80, 240, 560, 1360], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_6.4gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_6.4gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[168, 392, 784, 1624], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_8.0gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_8.0gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[80, 240, 720, 1920], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_1.6gf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_1.6gf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[72, 168, 408, 912], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://regnetx_800mf', 4 | backbone=dict( 5 | type='RegNet', 6 | arch='regnetx_800mf', 7 | out_indices=(0, 1, 2, 3), 8 | frozen_stages=1, 9 | norm_cfg=dict(type='BN', requires_grad=True), 10 | norm_eval=True, 11 | style='pytorch'), 12 | neck=dict( 13 | type='FPN', 14 | in_channels=[64, 128, 288, 672], 15 | out_channels=256, 16 | num_outs=5)) 17 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) 3 | # training and testing settings 4 | train_cfg = dict( 5 | init=dict( 6 | assigner=dict( 7 | _delete_=True, 8 | type='MaxIoUAssigner', 9 | pos_iou_thr=0.5, 10 | neg_iou_thr=0.4, 11 | min_pos_iou=0, 12 | ignore_iof_thr=-1))) 13 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/configs/reppoints/reppoints.png -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax')) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict( 5 | depth=101, 6 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 7 | stage_with_dcn=(False, True, True, True))) 8 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_1x_coco.py' 2 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 3 | model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) 4 | optimizer = dict(lr=0.01) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | lr_config = dict(step=[16, 22]) 3 | total_epochs = 24 4 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch', 14 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 15 | stage_with_dcn=(False, True, True, True))) 16 | -------------------------------------------------------------------------------- /lvis1.0/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='partial_minmax')) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/res2net/htc_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | # learning policy 6 | lr_config = dict(step=[16, 19]) 7 | total_epochs = 20 8 | -------------------------------------------------------------------------------- /lvis1.0/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/retinanet_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) 8 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', 3 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 4 | ] 5 | img_norm_cfg = dict( 6 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 7 | train_pipeline = [ 8 | dict(type='LoadImageFromFile'), 9 | dict(type='LoadAnnotations', with_bbox=True, with_label=False), 10 | dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), 11 | dict(type='RandomFlip', flip_ratio=0.5), 12 | dict(type='Normalize', **img_norm_cfg), 13 | dict(type='Pad', size_divisor=32), 14 | dict(type='DefaultFormatBundle'), 15 | dict(type='Collect', keys=['img', 'gt_bboxes']), 16 | ] 17 | data = dict(train=dict(pipeline=train_pipeline)) 18 | evaluation = dict(interval=1, metric='proposal_fast') 19 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /lvis1.0/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 7 | model = dict( 8 | pretrained=None, 9 | backbone=dict( 10 | frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg), 11 | neck=dict(norm_cfg=norm_cfg), 12 | roi_head=dict( 13 | bbox_head=dict( 14 | type='Shared4Conv1FCBBoxHead', 15 | conv_out_channels=256, 16 | norm_cfg=norm_cfg))) 17 | # optimizer 18 | optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) 19 | optimizer_config = dict(_delete_=True, grad_clip=None) 20 | # learning policy 21 | lr_config = dict(warmup_ratio=0.1, step=[65, 71]) 22 | total_epochs = 73 23 | -------------------------------------------------------------------------------- /lvis1.0/configs/wider_face/ssd300_wider_face.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', 3 | '../_base_/default_runtime.py' 4 | ] 5 | model = dict(bbox_head=dict(num_classes=1)) 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) 8 | optimizer_config = dict() 9 | # learning policy 10 | lr_config = dict( 11 | policy='step', 12 | warmup='linear', 13 | warmup_iters=1000, 14 | warmup_ratio=0.001, 15 | step=[16, 20]) 16 | # runtime settings 17 | total_epochs = 24 18 | log_config = dict(interval=1) 19 | -------------------------------------------------------------------------------- /lvis1.0/demo/demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/demo/demo.jpg -------------------------------------------------------------------------------- /lvis1.0/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTORCH="1.5" 2 | ARG CUDA="10.1" 3 | ARG CUDNN="7" 4 | 5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel 6 | 7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" 8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" 9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" 10 | 11 | RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ 12 | && apt-get clean \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | # Install MMCV 16 | RUN pip install mmcv-full==latest+torch1.5.0+cu101 -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html 17 | 18 | # Install MMDetection 19 | RUN conda clean --all 20 | RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection 21 | WORKDIR /mmdetection 22 | ENV FORCE_CUDA="1" 23 | RUN pip install -r requirements/build.txt 24 | RUN pip install --no-cache-dir -e . 25 | -------------------------------------------------------------------------------- /lvis1.0/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /lvis1.0/docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to MMDetection's documentation! 2 | ======================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | install.md 8 | getting_started.md 9 | model_zoo.md 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | :caption: Notes 14 | 15 | config.md 16 | compatibility.md 17 | changelog.md 18 | projects.md 19 | trouble_shooting.md 20 | 21 | .. toctree:: 22 | :maxdepth: 2 23 | :caption: Tutorials 24 | 25 | tutorials/index.rst 26 | 27 | .. toctree:: 28 | :caption: API Reference 29 | 30 | api.rst 31 | 32 | 33 | Indices and tables 34 | ================== 35 | 36 | * :ref:`genindex` 37 | * :ref:`search` 38 | -------------------------------------------------------------------------------- /lvis1.0/docs/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | .. toctree:: 2 | :maxdepth: 2 3 | 4 | finetune.md 5 | new_dataset.md 6 | data_pipeline.md 7 | new_modules.md 8 | -------------------------------------------------------------------------------- /lvis1.0/lvis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/lvis.png -------------------------------------------------------------------------------- /lvis1.0/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import (async_inference_detector, inference_detector, 2 | init_detector, show_result_pyplot) 3 | from .test import multi_gpu_test, single_gpu_test 4 | from .train import get_root_logger, set_random_seed, train_detector 5 | 6 | __all__ = [ 7 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 8 | 'async_inference_detector', 'inference_detector', 'show_result_pyplot', 9 | 'multi_gpu_test', 'single_gpu_test' 10 | ] 11 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .evaluation import * # noqa: F401, F403 4 | from .fp16 import * # noqa: F401, F403 5 | from .mask import * # noqa: F401, F403 6 | from .post_processing import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, 2 | YOLOAnchorGenerator) 3 | from .builder import ANCHOR_GENERATORS, build_anchor_generator 4 | from .point_generator import PointGenerator 5 | from .utils import anchor_inside_flags, calc_region, images_to_levels 6 | 7 | __all__ = [ 8 | 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', 9 | 'PointGenerator', 'images_to_levels', 'calc_region', 10 | 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator' 11 | ] 12 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/anchor/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | ANCHOR_GENERATORS = Registry('Anchor generator') 4 | 5 | 6 | def build_anchor_generator(cfg, default_args=None): 7 | return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args) 8 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 2 | from .assign_result import AssignResult 3 | from .atss_assigner import ATSSAssigner 4 | from .base_assigner import BaseAssigner 5 | from .center_region_assigner import CenterRegionAssigner 6 | from .grid_assigner import GridAssigner 7 | from .max_iou_assigner import MaxIoUAssigner 8 | from .point_assigner import PointAssigner 9 | 10 | __all__ = [ 11 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 12 | 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner' 13 | ] 14 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | """Base assigner that assigns boxes to ground truth boxes.""" 6 | 7 | @abstractmethod 8 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 9 | """Assign boxes to either a ground truth boxe or a negative boxes.""" 10 | pass 11 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | BBOX_ASSIGNERS = Registry('bbox_assigner') 4 | BBOX_SAMPLERS = Registry('bbox_sampler') 5 | BBOX_CODERS = Registry('bbox_coder') 6 | 7 | 8 | def build_assigner(cfg, **default_args): 9 | """Builder of box assigner.""" 10 | return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) 11 | 12 | 13 | def build_sampler(cfg, **default_args): 14 | """Builder of box sampler.""" 15 | return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) 16 | 17 | 18 | def build_bbox_coder(cfg, **default_args): 19 | """Builder of box coder.""" 20 | return build_from_cfg(cfg, BBOX_CODERS, default_args) 21 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/coder/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_bbox_coder import BaseBBoxCoder 2 | from .bucketing_bbox_coder import BucketingBBoxCoder 3 | from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder 4 | from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder 5 | from .pseudo_bbox_coder import PseudoBBoxCoder 6 | from .tblr_bbox_coder import TBLRBBoxCoder 7 | from .yolo_bbox_coder import YOLOBBoxCoder 8 | 9 | __all__ = [ 10 | 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 11 | 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', 12 | 'BucketingBBoxCoder' 13 | ] 14 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/coder/base_bbox_coder.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseBBoxCoder(metaclass=ABCMeta): 5 | """Base bounding box coder.""" 6 | 7 | def __init__(self, **kwargs): 8 | pass 9 | 10 | @abstractmethod 11 | def encode(self, bboxes, gt_bboxes): 12 | """Encode deltas between bboxes and ground truth boxes.""" 13 | pass 14 | 15 | @abstractmethod 16 | def decode(self, bboxes, bboxes_pred): 17 | """Decode the predicted bboxes according to prediction and base 18 | boxes.""" 19 | pass 20 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/coder/pseudo_bbox_coder.py: -------------------------------------------------------------------------------- 1 | from ..builder import BBOX_CODERS 2 | from .base_bbox_coder import BaseBBoxCoder 3 | 4 | 5 | @BBOX_CODERS.register_module() 6 | class PseudoBBoxCoder(BaseBBoxCoder): 7 | """Pseudo bounding box coder.""" 8 | 9 | def __init__(self, **kwargs): 10 | super(BaseBBoxCoder, self).__init__(**kwargs) 11 | 12 | def encode(self, bboxes, gt_bboxes): 13 | """torch.Tensor: return the given ``bboxes``""" 14 | return gt_bboxes 15 | 16 | def decode(self, bboxes, pred_bboxes): 17 | """torch.Tensor: return the given ``pred_bboxes``""" 18 | return pred_bboxes 19 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/iou_calculators/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_iou_calculator 2 | from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps 3 | 4 | __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] 5 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/iou_calculators/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | IOU_CALCULATORS = Registry('IoU calculator') 4 | 5 | 6 | def build_iou_calculator(cfg, default_args=None): 7 | """Builder of IoU calculator.""" 8 | return build_from_cfg(cfg, IOU_CALCULATORS, default_args) 9 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .combined_sampler import CombinedSampler 3 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 4 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 5 | from .ohem_sampler import OHEMSampler 6 | from .pseudo_sampler import PseudoSampler 7 | from .random_sampler import RandomSampler 8 | from .sampling_result import SamplingResult 9 | from .score_hlr_sampler import ScoreHLRSampler 10 | 11 | __all__ = [ 12 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 13 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 14 | 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler' 15 | ] 16 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from ..builder import BBOX_SAMPLERS, build_sampler 2 | from .base_sampler import BaseSampler 3 | 4 | 5 | @BBOX_SAMPLERS.register_module() 6 | class CombinedSampler(BaseSampler): 7 | """A sampler that combines positive sampler and negative sampler.""" 8 | 9 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 10 | super(CombinedSampler, self).__init__(**kwargs) 11 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 12 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 13 | 14 | def _sample_pos(self, **kwargs): 15 | """Sample positive samples.""" 16 | raise NotImplementedError 17 | 18 | def _sample_neg(self, **kwargs): 19 | """Sample negative samples.""" 20 | raise NotImplementedError 21 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, 2 | get_classes, imagenet_det_classes, 3 | imagenet_vid_classes, voc_classes) 4 | from .eval_hooks import DistEvalHook, EvalHook 5 | from .mean_ap import average_precision, eval_map, print_map_summary 6 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, 7 | print_recall_summary) 8 | 9 | __all__ = [ 10 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 11 | 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 12 | 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 13 | 'print_map_summary', 'eval_recalls', 'print_recall_summary', 14 | 'plot_num_recall', 'plot_iou_recall' 15 | ] 16 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/fp16/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import auto_fp16, force_fp32 2 | from .hooks import Fp16OptimizerHook, wrap_fp16_model 3 | 4 | __all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] 5 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .mask_target import mask_target 2 | from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks 3 | from .utils import encode_mask_results, split_combined_polys 4 | 5 | __all__ = [ 6 | 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', 7 | 'PolygonMasks', 'encode_mask_results' 8 | ] 9 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks, 3 | merge_aug_proposals, merge_aug_scores) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks' 8 | ] 9 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import DistOptimizerHook, allreduce_grads 2 | from .misc import multi_apply, tensor2imgs, unmap 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'multi_apply', 6 | 'unmap' 7 | ] 8 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/datasets/deepfashion.py: -------------------------------------------------------------------------------- 1 | from .builder import DATASETS 2 | from .coco import CocoDataset 3 | 4 | 5 | @DATASETS.register_module() 6 | class DeepFashionDataset(CocoDataset): 7 | 8 | CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 9 | 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 10 | 'skin', 'face') 11 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .distributed_sampler import DistributedSampler 2 | from .group_sampler import DistributedGroupSampler, GroupSampler 3 | 4 | __all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler'] 5 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 3 | ROI_EXTRACTORS, SHARED_HEADS, build_backbone, 4 | build_detector, build_head, build_loss, build_neck, 5 | build_roi_extractor, build_shared_head) 6 | from .dense_heads import * # noqa: F401,F403 7 | from .detectors import * # noqa: F401,F403 8 | from .losses import * # noqa: F401,F403 9 | from .necks import * # noqa: F401,F403 10 | from .roi_heads import * # noqa: F401,F403 11 | 12 | __all__ = [ 13 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 14 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 15 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 16 | ] 17 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .darknet import Darknet 2 | from .detectors_resnet import DetectoRS_ResNet 3 | from .detectors_resnext import DetectoRS_ResNeXt 4 | from .hourglass import HourglassNet 5 | from .hrnet import HRNet 6 | from .regnet import RegNet 7 | from .res2net import Res2Net 8 | from .resnet import ResNet, ResNetV1d 9 | from .resnext import ResNeXt 10 | from .ssd_vgg import SSDVGG 11 | 12 | __all__ = [ 13 | 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net', 14 | 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet' 15 | ] 16 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/atss.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class ATSS(SingleStageDetector): 7 | """Implementation of `ATSS `_.""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FasterRCNN(TwoStageDetector): 7 | """Implementation of `Faster R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | rpn_head, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | pretrained=None): 17 | super(FasterRCNN, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | roi_head=roi_head, 22 | train_cfg=train_cfg, 23 | test_cfg=test_cfg, 24 | pretrained=pretrained) 25 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/fcos.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FCOS(SingleStageDetector): 7 | """Implementation of `FCOS `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/fovea.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FOVEA(SingleStageDetector): 7 | """Implementation of `FoveaBox `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/fsaf.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FSAF(SingleStageDetector): 7 | """Implementation of `FSAF `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/gfl.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class GFL(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/htc.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .cascade_rcnn import CascadeRCNN 3 | 4 | 5 | @DETECTORS.register_module() 6 | class HybridTaskCascade(CascadeRCNN): 7 | """Implementation of `HTC `_""" 8 | 9 | def __init__(self, **kwargs): 10 | super(HybridTaskCascade, self).__init__(**kwargs) 11 | 12 | @property 13 | def with_semantic(self): 14 | """bool: whether the detector has a semantic head""" 15 | return self.roi_head.with_semantic 16 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class MaskRCNN(TwoStageDetector): 7 | """Implementation of `Mask R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | rpn_head, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | pretrained=None): 17 | super(MaskRCNN, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | roi_head=roi_head, 22 | train_cfg=train_cfg, 23 | test_cfg=test_cfg, 24 | pretrained=pretrained) 25 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/mask_scoring_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class MaskScoringRCNN(TwoStageDetector): 7 | """Mask Scoring RCNN. 8 | 9 | https://arxiv.org/abs/1903.00241 10 | """ 11 | 12 | def __init__(self, 13 | backbone, 14 | rpn_head, 15 | roi_head, 16 | train_cfg, 17 | test_cfg, 18 | neck=None, 19 | pretrained=None): 20 | super(MaskScoringRCNN, self).__init__( 21 | backbone=backbone, 22 | neck=neck, 23 | rpn_head=rpn_head, 24 | roi_head=roi_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | pretrained=pretrained) 28 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/nasfcos.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class NASFCOS(SingleStageDetector): 7 | """NAS-FCOS: Fast Neural Architecture Search for Object Detection. 8 | 9 | https://arxiv.org/abs/1906.0442 10 | """ 11 | 12 | def __init__(self, 13 | backbone, 14 | neck, 15 | bbox_head, 16 | train_cfg=None, 17 | test_cfg=None, 18 | pretrained=None): 19 | super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 20 | test_cfg, pretrained) 21 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/paa.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class PAA(SingleStageDetector): 7 | """Implementation of `PAA `_.""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class RetinaNet(SingleStageDetector): 7 | """Implementation of `RetinaNet `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/detectors/yolo.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 Western Digital Corporation or its affiliates. 2 | 3 | from ..builder import DETECTORS 4 | from .single_stage import SingleStageDetector 5 | 6 | 7 | @DETECTORS.register_module() 8 | class YOLOV3(SingleStageDetector): 9 | 10 | def __init__(self, 11 | backbone, 12 | neck, 13 | bbox_head, 14 | train_cfg=None, 15 | test_cfg=None, 16 | pretrained=None): 17 | super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, 18 | test_cfg, pretrained) 19 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .bfp import BFP 2 | from .fpn import FPN 3 | from .fpn_carafe import FPN_CARAFE 4 | from .hrfpn import HRFPN 5 | from .nas_fpn import NASFPN 6 | from .nasfcos_fpn import NASFCOS_FPN 7 | from .pafpn import PAFPN 8 | from .rfp import RFP 9 | from .yolo_neck import YOLOV3Neck 10 | 11 | __all__ = [ 12 | 'FPN', 'BFP', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', 'NASFCOS_FPN', 13 | 'RFP', 'YOLOV3Neck' 14 | ] 15 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/roi_heads/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, 3 | Shared4Conv1FCBBoxHead) 4 | from .double_bbox_head import DoubleConvFCBBoxHead 5 | from .sabl_head import SABLHead 6 | 7 | __all__ = [ 8 | 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 9 | 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead' 10 | ] 11 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/roi_heads/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .coarse_mask_head import CoarseMaskHead 2 | from .fcn_mask_head import FCNMaskHead 3 | from .fused_semantic_head import FusedSemanticHead 4 | from .grid_head import GridHead 5 | from .htc_mask_head import HTCMaskHead 6 | from .mask_point_head import MaskPointHead 7 | from .maskiou_head import MaskIoUHead 8 | 9 | __all__ = [ 10 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 11 | 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead' 12 | ] 13 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/roi_heads/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .generic_roi_extractor import GenericRoIExtractor 2 | from .single_level_roi_extractor import SingleRoIExtractor 3 | 4 | __all__ = [ 5 | 'SingleRoIExtractor', 6 | 'GenericRoIExtractor', 7 | ] 8 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/roi_heads/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .gaussian_target import gaussian_radius, gen_gaussian_target 2 | from .res_layer import ResLayer 3 | 4 | __all__ = ['ResLayer', 'gaussian_radius', 'gen_gaussian_target'] 5 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .logger import get_root_logger 3 | 4 | __all__ = ['get_root_logger', 'collect_env'] 5 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from mmcv.utils import get_logger 4 | 5 | 6 | def get_root_logger(log_file=None, log_level=logging.INFO): 7 | """Get root logger. 8 | 9 | Args: 10 | log_file (str, optional): File path of log. Defaults to None. 11 | log_level (int, optional): The level of logger. 12 | Defaults to logging.INFO. 13 | 14 | Returns: 15 | :obj:`logging.Logger`: The obtained logger 16 | """ 17 | logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) 18 | 19 | return logger 20 | -------------------------------------------------------------------------------- /lvis1.0/mmdet/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | 3 | __version__ = '2.4.0' 4 | short_version = __version__ 5 | 6 | 7 | def parse_version_info(version_str): 8 | version_info = [] 9 | for x in version_str.split('.'): 10 | if x.isdigit(): 11 | version_info.append(int(x)) 12 | elif x.find('rc') != -1: 13 | patch_version = x.split('rc') 14 | version_info.append(int(patch_version[0])) 15 | version_info.append(f'rc{patch_version[1]}') 16 | return tuple(version_info) 17 | 18 | 19 | version_info = parse_version_info(__version__) 20 | -------------------------------------------------------------------------------- /lvis1.0/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /lvis1.0/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/build.txt 2 | -r requirements/optional.txt 3 | -r requirements/runtime.txt 4 | -r requirements/tests.txt 5 | -------------------------------------------------------------------------------- /lvis1.0/requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building mmdetection 2 | cython 3 | numpy 4 | -------------------------------------------------------------------------------- /lvis1.0/requirements/docs.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx 3 | sphinx_markdown_tables 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /lvis1.0/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 2 | cityscapesscripts 3 | imagecorruptions 4 | mmlvis 5 | -------------------------------------------------------------------------------- /lvis1.0/requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv 2 | torch 3 | torchvision 4 | -------------------------------------------------------------------------------- /lvis1.0/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | mmpycocotools 3 | numpy 4 | six 5 | terminaltables 6 | -------------------------------------------------------------------------------- /lvis1.0/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | interrogate 5 | isort==4.3.21 6 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 7 | kwarray 8 | pytest 9 | ubelt 10 | xdoctest>=0.10.0 11 | yapf 12 | -------------------------------------------------------------------------------- /lvis1.0/resources/coco_test_12510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/resources/coco_test_12510.jpg -------------------------------------------------------------------------------- /lvis1.0/resources/corruptions_sev_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/resources/corruptions_sev_3.png -------------------------------------------------------------------------------- /lvis1.0/resources/data_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/resources/data_pipeline.png -------------------------------------------------------------------------------- /lvis1.0/resources/loss_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/resources/loss_curve.png -------------------------------------------------------------------------------- /lvis1.0/resources/mmdet-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis1.0/resources/mmdet-logo.png -------------------------------------------------------------------------------- /lvis1.0/setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | known_standard_library = setuptools 5 | known_first_party = mmdet 6 | known_third_party = PIL,asynctest,cityscapesscripts,cv2,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,robustness_eval,seaborn,six,terminaltables,torch,torchvision 7 | no_lines_before = STDLIB,LOCALFOLDER 8 | default_section = THIRDPARTY 9 | 10 | [yapf] 11 | BASED_ON_STYLE = pep8 12 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 13 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 14 | -------------------------------------------------------------------------------- /lvis1.0/tests/test_data/test_formatting.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from mmcv.utils import build_from_cfg 4 | 5 | from mmdet.datasets.builder import PIPELINES 6 | 7 | 8 | def test_default_format_bundle(): 9 | results = dict( 10 | img_prefix=osp.join(osp.dirname(__file__), '../data'), 11 | img_info=dict(filename='color.jpg')) 12 | load = dict(type='LoadImageFromFile') 13 | load = build_from_cfg(load, PIPELINES) 14 | bundle = dict(type='DefaultFormatBundle') 15 | bundle = build_from_cfg(bundle, PIPELINES) 16 | results = load(results) 17 | assert 'pad_shape' not in results 18 | assert 'scale_factor' not in results 19 | assert 'img_norm_cfg' not in results 20 | results = bundle(results) 21 | assert 'pad_shape' in results 22 | assert 'scale_factor' in results 23 | assert 'img_norm_cfg' in results 24 | -------------------------------------------------------------------------------- /lvis1.0/tests/test_version.py: -------------------------------------------------------------------------------- 1 | from mmdet import digit_version 2 | 3 | 4 | def test_version_check(): 5 | assert digit_version('1.0.5') > digit_version('1.0.5rc0') 6 | assert digit_version('1.0.5') > digit_version('1.0.4rc0') 7 | assert digit_version('1.0.5') > digit_version('1.0rc0') 8 | assert digit_version('1.0.0') > digit_version('0.6.2') 9 | assert digit_version('1.0.0') > digit_version('0.2.16') 10 | assert digit_version('1.0.5rc0') > digit_version('1.0.0rc0') 11 | assert digit_version('1.0.0rc1') > digit_version('1.0.0rc0') 12 | assert digit_version('1.0.0rc2') > digit_version('1.0.0rc0') 13 | assert digit_version('1.0.0rc2') > digit_version('1.0.0rc1') 14 | assert digit_version('1.0.1rc1') > digit_version('1.0.0rc1') 15 | assert digit_version('1.0.0') > digit_version('1.0.0rc1') 16 | -------------------------------------------------------------------------------- /lvis1.0/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | CHECKPOINT=$2 5 | GPUS=$3 6 | PORT=${PORT:-29500} 7 | 8 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 9 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 10 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 11 | -------------------------------------------------------------------------------- /lvis1.0/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | PORT=${PORT:-29500} 6 | 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /lvis1.0/tools/print_config.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from mmcv import Config, DictAction 4 | 5 | 6 | def parse_args(): 7 | parser = argparse.ArgumentParser(description='Print the whole config') 8 | parser.add_argument('config', help='config file path') 9 | parser.add_argument( 10 | '--options', nargs='+', action=DictAction, help='arguments in dict') 11 | args = parser.parse_args() 12 | 13 | return args 14 | 15 | 16 | def main(): 17 | args = parse_args() 18 | 19 | cfg = Config.fromfile(args.config) 20 | if args.options is not None: 21 | cfg.merge_from_dict(args.options) 22 | print(f'Config:\n{cfg.pretty_text}') 23 | 24 | 25 | if __name__ == '__main__': 26 | main() 27 | -------------------------------------------------------------------------------- /lvis1.0/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /lvis1.0/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${@:5} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /lvis_old/.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /lvis_old/.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the feature** 11 | 12 | **Motivation** 13 | A clear and concise description of the motivation of the feature. 14 | Ex1. It is inconvenient when [....]. 15 | Ex2. There is a recent paper [....], which is very helpful for [....]. 16 | 17 | **Related resources** 18 | If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful. 19 | 20 | **Additional context** 21 | Add any other context or screenshots about the feature request here. 22 | If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated. 23 | -------------------------------------------------------------------------------- /lvis_old/.github/ISSUE_TEMPLATE/general_questions.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General questions 3 | about: Ask general questions to get help 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /lvis_old/.isort.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | known_standard_library = setuptools 5 | known_first_party = mmdet 6 | known_third_party = asynctest,cv2,matplotlib,mmcv,numpy,pycocotools,robustness_eval,roi_align,roi_pool,seaborn,six,terminaltables,torch,torchvision 7 | no_lines_before = STDLIB,LOCALFOLDER 8 | default_section = THIRDPARTY 9 | -------------------------------------------------------------------------------- /lvis_old/.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/asottile/seed-isort-config 3 | rev: v1.9.3 4 | hooks: 5 | - id: seed-isort-config 6 | - repo: https://github.com/pre-commit/mirrors-isort 7 | rev: v4.3.21 8 | hooks: 9 | - id: isort 10 | - repo: https://github.com/pre-commit/mirrors-yapf 11 | rev: v0.29.0 12 | hooks: 13 | - id: yapf 14 | - repo: https://github.com/pre-commit/pre-commit-hooks 15 | rev: v2.4.0 16 | hooks: 17 | - id: flake8 18 | - id: trailing-whitespace 19 | - id: check-yaml 20 | - id: end-of-file-fixer 21 | - id: requirements-txt-fixer 22 | -------------------------------------------------------------------------------- /lvis_old/.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | BASED_ON_STYLE = pep8 3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 5 | -------------------------------------------------------------------------------- /lvis_old/configs/reppoints/reppoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis_old/configs/reppoints/reppoints.png -------------------------------------------------------------------------------- /lvis_old/demo/coco_test_12510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis_old/demo/coco_test_12510.jpg -------------------------------------------------------------------------------- /lvis_old/demo/corruptions_sev_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis_old/demo/corruptions_sev_3.png -------------------------------------------------------------------------------- /lvis_old/demo/data_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis_old/demo/data_pipeline.png -------------------------------------------------------------------------------- /lvis_old/demo/demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis_old/demo/demo.jpg -------------------------------------------------------------------------------- /lvis_old/demo/loss_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KaihuaTang/Long-Tailed-Recognition.pytorch/5c916d0d0fd5e1fa984621b86f15cda2bc69df17/lvis_old/demo/loss_curve.png -------------------------------------------------------------------------------- /lvis_old/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTORCH="1.3" 2 | ARG CUDA="10.1" 3 | ARG CUDNN="7" 4 | 5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel 6 | 7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" 8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" 9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" 10 | 11 | RUN apt-get update && apt-get install -y libglib2.0-0 libsm6 libxrender-dev libxext6 \ 12 | && apt-get clean \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | # Install mmdetection 16 | RUN conda clean --all 17 | RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection 18 | WORKDIR /mmdetection 19 | ENV FORCE_CUDA="1" 20 | RUN pip install --no-cache-dir -e . 21 | -------------------------------------------------------------------------------- /lvis_old/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /lvis_old/docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to MMDetection's documentation! 2 | ======================================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | INSTALL.md 8 | GETTING_STARTED.md 9 | MODEL_ZOO.md 10 | TECHNICAL_DETAILS.md 11 | CHANGELOG.md 12 | 13 | 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`search` 20 | -------------------------------------------------------------------------------- /lvis_old/docs/requirements.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx 3 | sphinx_markdown_tables 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /lvis_old/mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__, short_version 2 | 3 | __all__ = ['__version__', 'short_version'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import (async_inference_detector, inference_detector, 2 | init_detector, show_result, show_result_pyplot) 3 | from .train import get_root_logger, set_random_seed, train_detector 4 | 5 | __all__ = [ 6 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 7 | 'async_inference_detector', 'inference_detector', 'show_result', 8 | 'show_result_pyplot' 9 | ] 10 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .evaluation import * # noqa: F401, F403 4 | from .fp16 import * # noqa: F401, F403 5 | from .mask import * # noqa: F401, F403 6 | from .optimizer import * # noqa: F401, F403 7 | from .post_processing import * # noqa: F401, F403 8 | from .utils import * # noqa: F401, F403 9 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import AnchorGenerator 2 | from .anchor_target import (anchor_inside_flags, anchor_target, 3 | images_to_levels, unmap) 4 | from .guided_anchor_target import ga_loc_target, ga_shape_target 5 | from .point_generator import PointGenerator 6 | from .point_target import point_target 7 | 8 | __all__ = [ 9 | 'AnchorGenerator', 'anchor_target', 'anchor_inside_flags', 'ga_loc_target', 10 | 'ga_shape_target', 'PointGenerator', 'point_target', 'images_to_levels', 11 | 'unmap' 12 | ] 13 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 2 | from .assign_result import AssignResult 3 | from .atss_assigner import ATSSAssigner 4 | from .base_assigner import BaseAssigner 5 | from .max_iou_assigner import MaxIoUAssigner 6 | from .point_assigner import PointAssigner 7 | 8 | __all__ = [ 9 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 10 | 'PointAssigner', 'ATSSAssigner' 11 | ] 12 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | 6 | @abstractmethod 7 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 8 | pass 9 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .combined_sampler import CombinedSampler 3 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 4 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 5 | from .ohem_sampler import OHEMSampler 6 | from .pseudo_sampler import PseudoSampler 7 | from .random_sampler import RandomSampler 8 | from .sampling_result import SamplingResult 9 | 10 | __all__ = [ 11 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 12 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 13 | 'OHEMSampler', 'SamplingResult' 14 | ] 15 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from ..assign_sampling import build_sampler 2 | from .base_sampler import BaseSampler 3 | 4 | 5 | class CombinedSampler(BaseSampler): 6 | 7 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 8 | super(CombinedSampler, self).__init__(**kwargs) 9 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 10 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 11 | 12 | def _sample_pos(self, **kwargs): 13 | raise NotImplementedError 14 | 15 | def _sample_neg(self, **kwargs): 16 | raise NotImplementedError 17 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (coco_classes, dataset_aliases, get_classes, 2 | imagenet_det_classes, imagenet_vid_classes, 3 | voc_classes) 4 | from .eval_hooks import DistEvalHook 5 | from .mean_ap import average_precision, eval_map, print_map_summary 6 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, 7 | print_recall_summary) 8 | 9 | __all__ = [ 10 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 11 | 'coco_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', 12 | 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', 13 | 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall' 14 | ] 15 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/fp16/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import auto_fp16, force_fp32 2 | from .hooks import Fp16OptimizerHook, wrap_fp16_model 3 | 4 | __all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] 5 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/fp16/utils.py: -------------------------------------------------------------------------------- 1 | from collections import abc 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def cast_tensor_type(inputs, src_type, dst_type): 8 | if isinstance(inputs, torch.Tensor): 9 | return inputs.to(dst_type) 10 | elif isinstance(inputs, str): 11 | return inputs 12 | elif isinstance(inputs, np.ndarray): 13 | return inputs 14 | elif isinstance(inputs, abc.Mapping): 15 | return type(inputs)({ 16 | k: cast_tensor_type(v, src_type, dst_type) 17 | for k, v in inputs.items() 18 | }) 19 | elif isinstance(inputs, abc.Iterable): 20 | return type(inputs)( 21 | cast_tensor_type(item, src_type, dst_type) for item in inputs) 22 | else: 23 | return inputs 24 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .mask_target import mask_target 2 | from .utils import split_combined_polys 3 | 4 | __all__ = ['split_combined_polys', 'mask_target'] 5 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_optimizer 2 | from .copy_of_sgd import CopyOfSGD 3 | from .registry import OPTIMIZERS 4 | 5 | __all__ = ['OPTIMIZERS', 'build_optimizer', 'CopyOfSGD'] 6 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/optimizer/copy_of_sgd.py: -------------------------------------------------------------------------------- 1 | from torch.optim import SGD 2 | 3 | from .registry import OPTIMIZERS 4 | 5 | 6 | @OPTIMIZERS.register_module 7 | class CopyOfSGD(SGD): 8 | """A clone of torch.optim.SGD. 9 | 10 | A customized optimizer could be defined like CopyOfSGD. 11 | You may derive from built-in optimizers in torch.optim, 12 | or directly implement a new optimizer. 13 | """ 14 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/optimizer/registry.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | 3 | import torch 4 | 5 | from mmdet.utils import Registry 6 | 7 | OPTIMIZERS = Registry('optimizer') 8 | 9 | 10 | def register_torch_optimizers(): 11 | torch_optimizers = [] 12 | for module_name in dir(torch.optim): 13 | if module_name.startswith('__'): 14 | continue 15 | _optim = getattr(torch.optim, module_name) 16 | if inspect.isclass(_optim) and issubclass(_optim, 17 | torch.optim.Optimizer): 18 | OPTIMIZERS.register_module(_optim) 19 | torch_optimizers.append(module_name) 20 | return torch_optimizers 21 | 22 | 23 | TORCH_OPTIMIZERS = register_torch_optimizers() 24 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import multiclass_nms 2 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks, 3 | merge_aug_proposals, merge_aug_scores) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks' 8 | ] 9 | -------------------------------------------------------------------------------- /lvis_old/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import DistOptimizerHook, allreduce_grads 2 | from .misc import multi_apply, tensor2imgs, unmap 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', 6 | 'multi_apply' 7 | ] 8 | -------------------------------------------------------------------------------- /lvis_old/mmdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_dataset 2 | from .cityscapes import CityscapesDataset 3 | from .coco import CocoDataset 4 | from .custom import CustomDataset 5 | from .dataset_wrappers import ConcatDataset, RepeatDataset 6 | from .loader import DistributedGroupSampler, GroupSampler, build_dataloader 7 | from .registry import DATASETS 8 | from .voc import VOCDataset 9 | from .wider_face import WIDERFaceDataset 10 | from .xml_style import XMLDataset 11 | from .LVIS import LVISDataset 12 | 13 | __all__ = [ 14 | 'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 15 | 'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler', 16 | 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'WIDERFaceDataset', 17 | 'DATASETS', 'build_dataset', 'LVISDataset', 18 | ] 19 | -------------------------------------------------------------------------------- /lvis_old/mmdet/datasets/cityscapes.py: -------------------------------------------------------------------------------- 1 | from .coco import CocoDataset 2 | from .registry import DATASETS 3 | 4 | 5 | @DATASETS.register_module 6 | class CityscapesDataset(CocoDataset): 7 | 8 | CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 9 | 'bicycle') 10 | -------------------------------------------------------------------------------- /lvis_old/mmdet/datasets/loader/__init__.py: -------------------------------------------------------------------------------- 1 | from .build_loader import build_dataloader 2 | from .sampler import DistributedGroupSampler, GroupSampler 3 | 4 | __all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader'] 5 | -------------------------------------------------------------------------------- /lvis_old/mmdet/datasets/registry.py: -------------------------------------------------------------------------------- 1 | from mmdet.utils import Registry 2 | 3 | DATASETS = Registry('dataset') 4 | PIPELINES = Registry('pipeline') 5 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/anchor_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_head import AnchorHead 2 | from .atss_head import ATSSHead 3 | from .fcos_head import FCOSHead 4 | from .fovea_head import FoveaHead 5 | from .free_anchor_retina_head import FreeAnchorRetinaHead 6 | from .ga_retina_head import GARetinaHead 7 | from .ga_rpn_head import GARPNHead 8 | from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead 9 | from .reppoints_head import RepPointsHead 10 | from .retina_head import RetinaHead 11 | from .retina_sepbn_head import RetinaSepBNHead 12 | from .rpn_head import RPNHead 13 | from .ssd_head import SSDHead 14 | 15 | __all__ = [ 16 | 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', 17 | 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', 'SSDHead', 18 | 'FCOSHead', 'RepPointsHead', 'FoveaHead', 'FreeAnchorRetinaHead', 19 | 'ATSSHead' 20 | ] 21 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .hrnet import HRNet 2 | from .resnet import ResNet, make_res_layer 3 | from .resnext import ResNeXt 4 | from .ssd_vgg import SSDVGG 5 | 6 | __all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet'] 7 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead 3 | from .double_bbox_head import DoubleConvFCBBoxHead 4 | 5 | __all__ = [ 6 | 'BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead', 'DoubleConvFCBBoxHead' 7 | ] 8 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/detectors/atss.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class ATSS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/detectors/fcos.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FCOS(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/detectors/fovea.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class FOVEA(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from ..registry import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module 6 | class RetinaNet(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/losses/mse_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | from ..registry import LOSSES 5 | from .utils import weighted_loss 6 | 7 | mse_loss = weighted_loss(F.mse_loss) 8 | 9 | 10 | @LOSSES.register_module 11 | class MSELoss(nn.Module): 12 | 13 | def __init__(self, reduction='mean', loss_weight=1.0): 14 | super().__init__() 15 | self.reduction = reduction 16 | self.loss_weight = loss_weight 17 | 18 | def forward(self, pred, target, weight=None, avg_factor=None): 19 | loss = self.loss_weight * mse_loss( 20 | pred, 21 | target, 22 | weight, 23 | reduction=self.reduction, 24 | avg_factor=avg_factor) 25 | return loss 26 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .fcn_mask_head import FCNMaskHead 2 | from .fused_semantic_head import FusedSemanticHead 3 | from .grid_head import GridHead 4 | from .htc_mask_head import HTCMaskHead 5 | from .maskiou_head import MaskIoUHead 6 | 7 | __all__ = [ 8 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 9 | 'MaskIoUHead' 10 | ] 11 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .bfp import BFP 2 | from .fpn import FPN 3 | from .fpn_carafe import FPN_CARAFE 4 | from .hrfpn import HRFPN 5 | from .nas_fpn import NASFPN 6 | 7 | __all__ = ['FPN', 'BFP', 'HRFPN', 'NASFPN', 'FPN_CARAFE'] 8 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | from .generalized_attention import GeneralizedAttention 2 | from .non_local import NonLocal2D 3 | 4 | __all__ = ['NonLocal2D', 'GeneralizedAttention'] 5 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/registry.py: -------------------------------------------------------------------------------- 1 | from mmdet.utils import Registry 2 | 3 | BACKBONES = Registry('backbone') 4 | NECKS = Registry('neck') 5 | ROI_EXTRACTORS = Registry('roi_extractor') 6 | SHARED_HEADS = Registry('shared_head') 7 | HEADS = Registry('head') 8 | LOSSES = Registry('loss') 9 | DETECTORS = Registry('detector') 10 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .single_level import SingleRoIExtractor 2 | 3 | __all__ = ['SingleRoIExtractor'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_module import ConvModule, build_conv_layer 2 | from .conv_ws import ConvWS2d, conv_ws_2d 3 | from .norm import build_norm_layer 4 | from .scale import Scale 5 | from .upsample import build_upsample_layer 6 | from .weight_init import (bias_init_with_prob, kaiming_init, normal_init, 7 | uniform_init, xavier_init) 8 | 9 | __all__ = [ 10 | 'conv_ws_2d', 'ConvWS2d', 'build_conv_layer', 'ConvModule', 11 | 'build_norm_layer', 'build_upsample_layer', 'xavier_init', 'normal_init', 12 | 'uniform_init', 'kaiming_init', 'bias_init_with_prob', 'Scale' 13 | ] 14 | -------------------------------------------------------------------------------- /lvis_old/mmdet/models/utils/scale.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class Scale(nn.Module): 6 | """ 7 | A learnable scale parameter 8 | """ 9 | 10 | def __init__(self, scale=1.0): 11 | super(Scale, self).__init__() 12 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) 13 | 14 | def forward(self, x): 15 | return x * self.scale 16 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/affine_grid/__init__.py: -------------------------------------------------------------------------------- 1 | from .affine_grid import affine_grid 2 | 3 | __all__ = ['affine_grid'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/carafe/__init__.py: -------------------------------------------------------------------------------- 1 | from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive 2 | 3 | __all__ = ['carafe', 'carafe_naive', 'CARAFE', 'CARAFENaive', 'CARAFEPack'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/dcn/__init__.py: -------------------------------------------------------------------------------- 1 | from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, 2 | ModulatedDeformConvPack, deform_conv, 3 | modulated_deform_conv) 4 | from .deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, 5 | ModulatedDeformRoIPoolingPack, deform_roi_pooling) 6 | 7 | __all__ = [ 8 | 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 9 | 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 10 | 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', 11 | 'deform_roi_pooling' 12 | ] 13 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/grid_sampler/__init__.py: -------------------------------------------------------------------------------- 1 | from .grid_sampler import grid_sample 2 | 3 | __all__ = ['grid_sample'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/masked_conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .masked_conv import MaskedConv2d, masked_conv2d 2 | 3 | __all__ = ['masked_conv2d', 'MaskedConv2d'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import nms, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/nms/src/nms_cuda.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | #include 3 | 4 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 5 | 6 | at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); 7 | 8 | at::Tensor nms(const at::Tensor& dets, const float threshold) { 9 | CHECK_CUDA(dets); 10 | if (dets.numel() == 0) 11 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); 12 | return nms_cuda(dets, threshold); 13 | } 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("nms", &nms, "non-maximum suppression"); 17 | } -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/roi_align/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_align import RoIAlign, roi_align 2 | 3 | __all__ = ['roi_align', 'RoIAlign'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/roi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_pool import RoIPool, roi_pool 2 | 3 | __all__ = ['roi_pool', 'RoIPool'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/roi_pool/gradcheck.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | import torch 5 | from torch.autograd import gradcheck 6 | 7 | sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 8 | from roi_pool import RoIPool # noqa: E402, isort:skip 9 | 10 | feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() 11 | rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], 12 | [1, 67, 40, 110, 120]]).cuda() 13 | inputs = (feat, rois) 14 | print('Gradcheck for roi pooling...') 15 | test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) 16 | print(test) 17 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/scale.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class Scale(nn.Module): 6 | """ 7 | A learnable scale parameter 8 | """ 9 | 10 | def __init__(self, scale=1.0): 11 | super(Scale, self).__init__() 12 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) 13 | 14 | def forward(self, x): 15 | return x * self.scale 16 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/sigmoid_focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 2 | 3 | __all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] 4 | -------------------------------------------------------------------------------- /lvis_old/mmdet/ops/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # from . import compiling_info 2 | from .compiling_info import get_compiler_version, get_compiling_cuda_version 3 | 4 | # get_compiler_version = compiling_info.get_compiler_version 5 | # get_compiling_cuda_version = compiling_info.get_compiling_cuda_version 6 | 7 | __all__ = ['get_compiler_version', 'get_compiling_cuda_version'] 8 | -------------------------------------------------------------------------------- /lvis_old/mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .flops_counter import get_model_complexity_info 3 | from .logger import get_root_logger, print_log 4 | from .registry import Registry, build_from_cfg 5 | 6 | __all__ = [ 7 | 'Registry', 'build_from_cfg', 'get_model_complexity_info', 8 | 'get_root_logger', 'print_log', 'collect_env' 9 | ] 10 | -------------------------------------------------------------------------------- /lvis_old/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /lvis_old/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/runtime.txt 2 | -r requirements/optional.txt 3 | -r requirements/tests.txt 4 | -r requirements/build.txt 5 | -------------------------------------------------------------------------------- /lvis_old/requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building mmdetection 2 | numpy 3 | torch>=1.1 4 | -------------------------------------------------------------------------------- /lvis_old/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 2 | imagecorruptions 3 | -------------------------------------------------------------------------------- /lvis_old/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | mmcv>=0.3.1 3 | numpy 4 | # need older pillow until torchvision is fixed 5 | Pillow<=6.2.2 6 | six 7 | terminaltables 8 | torch>=1.1 9 | torchvision 10 | -------------------------------------------------------------------------------- /lvis_old/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | isort 5 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 6 | kwarray 7 | pytest 8 | pytest-cov 9 | pytest-runner 10 | ubelt 11 | xdoctest >= 0.10.0 12 | yapf 13 | -------------------------------------------------------------------------------- /lvis_old/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy.testing as npt 2 | 3 | from mmdet.utils.flops_counter import params_to_string 4 | 5 | 6 | def test_params_to_string(): 7 | npt.assert_equal(params_to_string(1e9), '1000.0 M') 8 | npt.assert_equal(params_to_string(2e5), '200.0 k') 9 | npt.assert_equal(params_to_string(3e-9), '3e-09') 10 | -------------------------------------------------------------------------------- /lvis_old/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | CHECKPOINT=$2 7 | GPUS=$3 8 | PORT=${PORT:-29500} 9 | 10 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 11 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 12 | -------------------------------------------------------------------------------- /lvis_old/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PYTHON=${PYTHON:-"python"} 4 | 5 | CONFIG=$1 6 | GPUS=$2 7 | PORT=${PORT:-29500} 8 | 9 | $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 10 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 11 | -------------------------------------------------------------------------------- /lvis_old/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /lvis_old/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${5:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${PY_ARGS:-"--validate"} 14 | 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 24 | --------------------------------------------------------------------------------