├── Change Detection ├── LICENSE ├── README.md ├── eval.py ├── metadata.json ├── models │ ├── Models.py │ ├── ViTAE_Window_NoShift │ │ ├── NormalCell.py │ │ ├── ReductionCell.py │ │ ├── SELayer.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── NormalCell.cpython-37.pyc │ │ │ ├── NormalCell.cpython-38.pyc │ │ │ ├── ReductionCell.cpython-37.pyc │ │ │ ├── ReductionCell.cpython-38.pyc │ │ │ ├── SELayer.cpython-37.pyc │ │ │ ├── SELayer.cpython-38.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── base_model.cpython-37.pyc │ │ │ ├── base_model.cpython-38.pyc │ │ │ ├── models.cpython-37.pyc │ │ │ ├── models.cpython-38.pyc │ │ │ ├── swin.cpython-37.pyc │ │ │ ├── swin.cpython-38.pyc │ │ │ ├── token_performer.cpython-37.pyc │ │ │ ├── token_performer.cpython-38.pyc │ │ │ ├── token_transformer.cpython-37.pyc │ │ │ └── token_transformer.cpython-38.pyc │ │ ├── base_model.py │ │ ├── models.py │ │ ├── swin.py │ │ ├── token_performer.py │ │ └── token_transformer.py │ ├── __pycache__ │ │ ├── Models.cpython-38.pyc │ │ ├── networks.cpython-38.pyc │ │ ├── resnet.cpython-38.pyc │ │ ├── siamunet_dif.cpython-38.pyc │ │ └── swin_transformer.cpython-38.pyc │ ├── networks.py │ ├── resnet.py │ ├── siamunet_dif.py │ └── swin_transformer.py ├── train.py ├── utils │ ├── __pycache__ │ │ ├── dataloaders.cpython-38.pyc │ │ ├── helpers.cpython-38.pyc │ │ ├── losses.cpython-38.pyc │ │ ├── metrics.cpython-38.pyc │ │ ├── parser.cpython-38.pyc │ │ └── transforms.cpython-38.pyc │ ├── dataloaders.py │ ├── helpers.py │ ├── losses.py │ ├── metrics.py │ ├── parser.py │ └── transforms.py └── visualization.py ├── Figs ├── aerialscene.png ├── cd.png ├── det.png ├── seg.png └── video.png ├── LICENSE ├── Object Detection ├── .github │ ├── CODE_OF_CONDUCT.md │ ├── CONTRIBUTING.md │ ├── ISSUE_TEMPLATE │ │ ├── config.yml │ │ ├── error-report.md │ │ ├── feature_request.md │ │ ├── general_questions.md │ │ └── reimplementation_questions.md │ └── workflows │ │ └── build.yml ├── .gitignore ├── .gitmodules ├── .isort.cfg ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── .style.yapf ├── BboxToolkit │ ├── .gitignore │ ├── BboxToolkit │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── DIORio.py │ │ │ ├── DOTAio.py │ │ │ ├── HRSCio.py │ │ │ ├── MSRA_TD500io.py │ │ │ ├── RCTW_17io.py │ │ │ ├── VOCio.py │ │ │ ├── __init__.py │ │ │ ├── io.py │ │ │ └── misc.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── mean_ap.py │ │ │ └── recall.py │ │ ├── geometry.py │ │ ├── move.py │ │ ├── transforms.py │ │ ├── utils.py │ │ └── visualization │ │ │ ├── __init__.py │ │ │ ├── colors.py │ │ │ ├── draw.py │ │ │ └── show.py │ ├── LICENSE │ ├── README.md │ ├── USAGE.md │ ├── definition.png │ ├── setup.py │ └── tools │ │ ├── cal_mAP.py │ │ ├── img_split.py │ │ ├── split_configs │ │ ├── dota1_0 │ │ │ ├── 600_test.json │ │ │ ├── 600_train.json │ │ │ ├── 600_trainval.json │ │ │ ├── 600_val.json │ │ │ ├── ms_test.json │ │ │ ├── ms_train.json │ │ │ ├── ms_trainval.json │ │ │ ├── ms_val.json │ │ │ ├── ss_test.json │ │ │ ├── ss_train.json │ │ │ ├── ss_trainval.json │ │ │ └── ss_val.json │ │ ├── dota1_5 │ │ │ ├── 600_test.json │ │ │ ├── 600_train.json │ │ │ ├── 600_trainval.json │ │ │ ├── 600_val.json │ │ │ ├── ms_test.json │ │ │ ├── ms_train.json │ │ │ ├── ms_trainval.json │ │ │ ├── ms_val.json │ │ │ ├── ss_test.json │ │ │ ├── ss_train.json │ │ │ ├── ss_trainval.json │ │ │ └── ss_val.json │ │ └── dota2_0 │ │ │ ├── 600_test.json │ │ │ ├── 600_train.json │ │ │ ├── 600_trainval.json │ │ │ ├── 600_val.json │ │ │ ├── ms_test.json │ │ │ ├── ms_train.json │ │ │ ├── ms_trainval.json │ │ │ ├── ms_val.json │ │ │ ├── ss_test.json │ │ │ ├── ss_train.json │ │ │ ├── ss_trainval.json │ │ │ └── ss_val.json │ │ ├── vis_configs │ │ ├── dior │ │ │ ├── colors.txt │ │ │ ├── config.json │ │ │ └── short_names.txt │ │ ├── dota1_0 │ │ │ ├── colors.txt │ │ │ ├── config.json │ │ │ └── short_names.txt │ │ └── hrsc │ │ │ ├── config.json │ │ │ └── short_names.txt │ │ └── visualize.py ├── LICENSE ├── README.md ├── configs │ ├── _base_ │ │ ├── datasets │ │ │ ├── cityscapes_detection.py │ │ │ ├── cityscapes_instance.py │ │ │ ├── coco_detection.py │ │ │ ├── coco_instance.py │ │ │ ├── coco_instance_semantic.py │ │ │ ├── deepfashion.py │ │ │ ├── lvis_instance.py │ │ │ ├── voc0712.py │ │ │ └── wider_face.py │ │ ├── default_runtime.py │ │ ├── models │ │ │ ├── cascade_mask_rcnn_r50_fpn.py │ │ │ ├── cascade_rcnn_r50_fpn.py │ │ │ ├── fast_rcnn_r50_fpn.py │ │ │ ├── faster_rcnn_r50_caffe_c4.py │ │ │ ├── faster_rcnn_r50_fpn.py │ │ │ ├── mask_rcnn_r50_caffe_c4.py │ │ │ ├── mask_rcnn_r50_fpn.py │ │ │ ├── retinanet_r50_fpn.py │ │ │ ├── rpn_r50_caffe_c4.py │ │ │ ├── rpn_r50_fpn.py │ │ │ └── ssd300.py │ │ └── schedules │ │ │ ├── schedule_1x.py │ │ │ ├── schedule_20e.py │ │ │ └── schedule_2x.py │ ├── albu_example │ │ ├── README.md │ │ └── mask_rcnn_r50_fpn_albu_1x_coco.py │ ├── atss │ │ ├── README.md │ │ └── atss_r50_fpn_1x_coco.py │ ├── carafe │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_carafe_1x_coco.py │ │ └── mask_rcnn_r50_fpn_carafe_1x_coco.py │ ├── cascade_rcnn │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r101_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r101_fpn_20e_coco.py │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_20e_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py │ │ ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── cascade_rcnn_r101_fpn_1x_coco.py │ │ ├── cascade_rcnn_r101_fpn_20e_coco.py │ │ ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── cascade_rcnn_r50_fpn_1x_coco.py │ │ ├── cascade_rcnn_r50_fpn_20e_coco.py │ │ ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── cascade_rcnn_x101_64x4d_fpn_20e_coco.py │ ├── cityscapes │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py │ │ └── mask_rcnn_r50_fpn_1x_cityscapes.py │ ├── dcn │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_dpool_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ └── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ ├── deepfashion │ │ ├── README.md │ │ └── mask_rcnn_r50_fpn_15e_deepfashion.py │ ├── detectors │ │ ├── README.md │ │ ├── cascade_rcnn_r50_rfp_1x_coco.py │ │ ├── cascade_rcnn_r50_sac_1x_coco.py │ │ ├── detectors_cascade_rcnn_r50_1x_coco.py │ │ ├── detectors_htc_r50_1x_coco.py │ │ ├── htc_r50_rfp_1x_coco.py │ │ └── htc_r50_sac_1x_coco.py │ ├── double_heads │ │ ├── README.md │ │ └── dh_faster_rcnn_r50_fpn_1x_coco.py │ ├── dynamic_rcnn │ │ ├── README.md │ │ └── dynamic_rcnn_r50_fpn_1x.py │ ├── empirical_attention │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py │ │ └── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py │ ├── fast_rcnn │ │ ├── README.md │ │ ├── fast_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── fast_rcnn_r101_fpn_1x_coco.py │ │ ├── fast_rcnn_r101_fpn_2x_coco.py │ │ ├── fast_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── fast_rcnn_r50_fpn_1x_coco.py │ │ └── fast_rcnn_r50_fpn_2x_coco.py │ ├── faster_rcnn │ │ ├── README.md │ │ ├── faster_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── faster_rcnn_r101_fpn_1x_coco.py │ │ ├── faster_rcnn_r101_fpn_2x_coco.py │ │ ├── faster_rcnn_r50_caffe_c4_1x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py │ │ ├── faster_rcnn_r50_fpn_1x_coco-person-bicycle-car.py │ │ ├── faster_rcnn_r50_fpn_1x_coco-person.py │ │ ├── faster_rcnn_r50_fpn_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_2x_coco.py │ │ ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_giou_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_iou_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_ohem_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py │ │ ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── faster_rcnn_x101_64x4d_fpn_2x_coco.py │ ├── fcos │ │ ├── README.md │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_4x4_1x_coco.py │ │ ├── fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_r101_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_r101_caffe_fpn_gn-head_4x4_2x_coco.py │ │ ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fcos_r50_caffe_fpn_4x4_1x_coco.py │ │ ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ ├── fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py │ │ ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ └── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_4x2_2x_coco.py │ ├── foveabox │ │ ├── README.md │ │ ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py │ │ ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py │ │ ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fovea_r101_fpn_4x4_1x_coco.py │ │ ├── fovea_r101_fpn_4x4_2x_coco.py │ │ ├── fovea_r50_fpn_4x4_1x_coco.py │ │ └── fovea_r50_fpn_4x4_2x_coco.py │ ├── fp16 │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_fp16_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_fp16_1x_coco.py │ │ └── retinanet_r50_fpn_fp16_1x_coco.py │ ├── free_anchor │ │ ├── README.md │ │ ├── retinanet_free_anchor_r101_fpn_1x_coco.py │ │ ├── retinanet_free_anchor_r50_fpn_1x_coco.py │ │ └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py │ ├── fsaf │ │ ├── README.md │ │ ├── fsaf_r101_fpn_1x_coco.py │ │ ├── fsaf_r50_fpn_1x_coco.py │ │ └── fsaf_x101_64x4d_fpn_1x_coco.py │ ├── gcnet │ │ ├── README.md │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ └── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ ├── gfl │ │ ├── README.md │ │ ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py │ │ ├── gfl_r101_fpn_mstrain_2x_coco.py │ │ ├── gfl_r50_fpn_1x_coco.py │ │ ├── gfl_r50_fpn_mstrain_2x_coco.py │ │ ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py │ │ └── gfl_x101_32x4d_fpn_mstrain_2x_coco.py │ ├── ghm │ │ ├── README.md │ │ ├── retinanet_ghm_r101_fpn_1x_coco.py │ │ ├── retinanet_ghm_r50_fpn_1x_coco.py │ │ ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py │ │ └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py │ ├── gn+ws │ │ ├── README.md │ │ ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py │ │ ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py │ │ ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py │ │ ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ └── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py │ ├── gn │ │ ├── README.md │ │ ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py │ │ ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py │ │ └── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py │ ├── grid_rcnn │ │ ├── README.md │ │ ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py │ │ ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py │ │ ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py │ │ ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py │ │ └── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py │ ├── groie │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_groie_1x_coco.py │ │ ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_groie_1x_coco.py │ │ └── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ ├── guided_anchoring │ │ ├── README.md │ │ ├── ga_fast_r50_caffe_fpn_1x_coco.py │ │ ├── ga_faster_r101_caffe_fpn_1x_coco.py │ │ ├── ga_faster_r50_caffe_fpn_1x_coco.py │ │ ├── ga_faster_r50_fpn_1x_coco.py │ │ ├── ga_faster_x101_32x4d_fpn_1x_coco.py │ │ ├── ga_faster_x101_64x4d_fpn_1x_coco.py │ │ ├── ga_retinanet_r101_caffe_fpn_1x_coco.py │ │ ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py │ │ ├── ga_retinanet_r50_caffe_fpn_1x_coco.py │ │ ├── ga_retinanet_r50_fpn_1x_coco.py │ │ ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py │ │ ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py │ │ ├── ga_rpn_r101_caffe_fpn_1x_coco.py │ │ ├── ga_rpn_r50_caffe_fpn_1x_coco.py │ │ ├── ga_rpn_r50_fpn_1x_coco.py │ │ ├── ga_rpn_x101_32x4d_fpn_1x_coco.py │ │ └── ga_rpn_x101_64x4d_fpn_1x_coco.py │ ├── hrnet │ │ ├── README.md │ │ ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py │ │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py │ │ ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py │ │ ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py │ │ ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py │ │ ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py │ │ ├── faster_rcnn_hrnetv2p_w18_1x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w18_2x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w32_1x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w32_2x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w40_1x_coco.py │ │ ├── faster_rcnn_hrnetv2p_w40_2x_coco.py │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ ├── htc_hrnetv2p_w18_20e_coco.py │ │ ├── htc_hrnetv2p_w32_20e_coco.py │ │ ├── htc_hrnetv2p_w40_20e_coco.py │ │ ├── htc_hrnetv2p_w40_28e_coco.py │ │ ├── htc_x101_64x4d_fpn_16x1_28e_coco.py │ │ ├── mask_rcnn_hrnetv2p_w18_1x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w18_2x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w32_1x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w32_2x_coco.py │ │ ├── mask_rcnn_hrnetv2p_w40_1x_coco.py │ │ └── mask_rcnn_hrnetv2p_w40_2x_coco.py │ ├── htc │ │ ├── README.md │ │ ├── htc_r101_fpn_20e_coco.py │ │ ├── htc_r50_fpn_1x_coco.py │ │ ├── htc_r50_fpn_20e_coco.py │ │ ├── htc_without_semantic_r50_fpn_1x_coco.py │ │ ├── htc_x101_32x4d_fpn_16x1_20e_coco.py │ │ ├── htc_x101_64x4d_fpn_16x1_20e_coco.py │ │ └── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py │ ├── instaboost │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ │ ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ └── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ ├── legacy_1.x │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py │ │ ├── faster_rcnn_r50_fpn_1x_coco_v1.py │ │ ├── mask_rcnn_r50_fpn_1x_coco_v1.py │ │ ├── retinanet_r50_caffe_fpn_1x_coco_v1.py │ │ ├── retinanet_r50_fpn_1x_coco_v1.py │ │ └── ssd300_coco_v1.py │ ├── libra_rcnn │ │ ├── README.md │ │ ├── libra_fast_rcnn_r50_fpn_1x_coco.py │ │ ├── libra_faster_rcnn_r101_fpn_1x_coco.py │ │ ├── libra_faster_rcnn_r50_fpn_1x_coco.py │ │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── libra_retinanet_r50_fpn_1x_coco.py │ ├── lvis │ │ ├── README.md │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis.py │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis.py │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis.py │ │ └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis.py │ ├── mask_rcnn │ │ ├── README.md │ │ ├── mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_1x_coco.py │ │ ├── mask_rcnn_r101_fpn_2x_coco.py │ │ ├── mask_rcnn_r50_caffe_c4_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py │ │ ├── mask_rcnn_r50_fpn_1x_coco.py │ │ ├── mask_rcnn_r50_fpn_2x_coco.py │ │ ├── mask_rcnn_r50_fpn_poly_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py │ │ ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py │ │ ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── mask_rcnn_x101_64x4d_fpn_2x_coco.py │ ├── ms_rcnn │ │ ├── README.md │ │ ├── ms_rcnn_r101_caffe_fpn_1x_coco.py │ │ ├── ms_rcnn_r101_caffe_fpn_2x_coco.py │ │ ├── ms_rcnn_r50_caffe_fpn_1x_coco.py │ │ ├── ms_rcnn_r50_caffe_fpn_2x_coco.py │ │ ├── ms_rcnn_r50_fpn_1x_coco.py │ │ ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py │ │ └── ms_rcnn_x101_64x4d_fpn_2x_coco.py │ ├── nas_fcos │ │ ├── README.md │ │ ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ ├── nas_fpn │ │ ├── README.md │ │ ├── retinanet_r50_fpn_crop640_50e_coco.py │ │ └── retinanet_r50_nasfpn_crop640_50e_coco.py │ ├── obb │ │ ├── _base_ │ │ │ ├── datasets │ │ │ │ ├── dior.py │ │ │ │ ├── dota.py │ │ │ │ ├── hrsc.py │ │ │ │ ├── isaid.py │ │ │ │ ├── msra_td500.py │ │ │ │ └── rctw17.py │ │ │ └── schedules │ │ │ │ ├── schedule_1x.py │ │ │ │ ├── schedule_2x.py │ │ │ │ └── schedule_3x.py │ │ ├── atss_obb │ │ │ └── README.md │ │ ├── double_heads_obb │ │ │ ├── README.md │ │ │ └── dh_faster_rcnn_obb_r50_fpn_1x_dota10.py │ │ ├── faster_rcnn_obb │ │ │ ├── README.md │ │ │ ├── faster_rcnn_obb_r101_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_obb_r101_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_obb_r50_fpn_1x_dota10.py │ │ │ └── faster_rcnn_obb_r50_fpn_3x_hrsc.py │ │ ├── fcos_obb │ │ │ ├── README.md │ │ │ └── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_dota10.py │ │ ├── gliding_vertex │ │ │ ├── README.md │ │ │ ├── gliding_vertex_r101_fpn_1x_dota10.py │ │ │ ├── gliding_vertex_r101_fpn_3x_hrsc.py │ │ │ ├── gliding_vertex_r50_fpn_1x_dota10.py │ │ │ └── gliding_vertex_r50_fpn_3x_hrsc.py │ │ ├── oriented_rcnn │ │ │ ├── README.md │ │ │ ├── faster_rcnn_orpn_our_imp_swin_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_our_imp_swin_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_orpn_our_imp_vitae_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_our_imp_vitae_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_orpn_our_rsp_swin_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_our_rsp_swin_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_orpn_our_rsp_vitae_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_our_rsp_vitae_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_orpn_r101_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_r101_fpn_1x_ms_rr_dota10.py │ │ │ ├── faster_rcnn_orpn_r101_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_orpn_r50_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_r50_fpn_1x_ms_rr_dota10.py │ │ │ ├── faster_rcnn_orpn_r50_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_orpn_rsp_r50_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_rsp_r50_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_orpn_seco_r50_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_orpn_seco_r50_fpn_3x_hrsc.py │ │ │ ├── illustration.jpg │ │ │ ├── orpn_r101_fpn_1x_dota10.py │ │ │ └── orpn_r50_fpn_1x_dota10.py │ │ ├── poly_iou_loss │ │ │ ├── README.md │ │ │ ├── retinanet_obb_r50_fpn_giouloss_1x_dota.py │ │ │ └── retinanet_obb_r50_fpn_iouloss_1x_dota.py │ │ ├── random_fp │ │ │ ├── README.md │ │ │ ├── datasets │ │ │ │ ├── 01fp_dota10.py │ │ │ │ ├── 03fp_dota10.py │ │ │ │ └── 05fp_dota10.py │ │ │ ├── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_01fp_dota10.py │ │ │ ├── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_03fp_dota10.py │ │ │ └── fcos_obb_r50_caffe_fpn_gn-head_4x4_1x_05fp_dota10.py │ │ ├── retinanet_obb │ │ │ ├── README.md │ │ │ ├── retinanet_obb_r101_fpn_1x_dota10.py │ │ │ ├── retinanet_obb_r101_fpn_2x_dota10.py │ │ │ ├── retinanet_obb_r101_fpn_3x_hrsc.py │ │ │ ├── retinanet_obb_r50_fpn_1x_dota10.py │ │ │ ├── retinanet_obb_r50_fpn_2x_dota10.py │ │ │ └── retinanet_obb_r50_fpn_3x_hrsc.py │ │ ├── roi_transformer │ │ │ ├── README.md │ │ │ ├── faster_rcnn_roitrans_r101_fpn_1x_dota10.py │ │ │ ├── faster_rcnn_roitrans_r101_fpn_3x_hrsc.py │ │ │ ├── faster_rcnn_roitrans_r50_fpn_1x_dota10.py │ │ │ └── faster_rcnn_roitrans_r50_fpn_3x_hrsc.py │ │ └── s2anet │ │ │ ├── README.md │ │ │ ├── s2anet_our_imp_r50_fpn_1x_dota10.py │ │ │ ├── s2anet_our_rsp_r50_fpn_1x_dota10.py │ │ │ ├── s2anet_our_rsp_swin_fpn_1x_dota10.py │ │ │ ├── s2anet_our_rsp_vitae_fpn_1x_dota10.py │ │ │ ├── s2anet_our_seco_r50_fpn_1x_dota10.py │ │ │ └── s2anet_r50_fpn_1x_dota10.py │ ├── pafpn │ │ ├── README.md │ │ └── faster_rcnn_r50_pafpn_1x_coco.py │ ├── pascal_voc │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ │ ├── retinanet_r50_fpn_1x_voc0712.py │ │ ├── ssd300_voc0712.py │ │ └── ssd512_voc0712.py │ ├── pisa │ │ ├── README.md │ │ ├── pisa_faster_rcnn_r50_fpn_1x_coco.py │ │ ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── pisa_mask_rcnn_r50_fpn_1x_coco.py │ │ ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ ├── pisa_retinanet_r50_fpn_1x_coco.py │ │ ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py │ │ ├── pisa_ssd300_coco.py │ │ └── pisa_ssd512_coco.py │ ├── point_rend │ │ ├── README.md │ │ ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py │ │ └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py │ ├── regnet │ │ ├── README.md │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ ├── faster_rcnn_regnetx-3GF_fpn_mstrain_3x_coco.py │ │ ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py │ │ ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py │ │ ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py │ │ ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py │ │ └── retinanet_regnetx-800MF_fpn_1x_coco.py │ ├── reppoints │ │ ├── README.md │ │ ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py │ │ ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py │ │ ├── reppoints.png │ │ ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py │ │ ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py │ │ ├── reppoints_moment_r50_fpn_1x_coco.py │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py │ │ ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py │ ├── res2net │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py │ │ ├── cascade_rcnn_r2_101_fpn_20e_coco.py │ │ ├── faster_rcnn_r2_101_fpn_2x_coco.py │ │ ├── htc_r2_101_fpn_20e_coco.py │ │ └── mask_rcnn_r2_101_fpn_2x_coco.py │ ├── retinanet │ │ ├── README.md │ │ ├── retinanet_r101_caffe_fpn_1x_coco.py │ │ ├── retinanet_r101_fpn_1x_coco.py │ │ ├── retinanet_r101_fpn_2x_coco.py │ │ ├── retinanet_r50_caffe_fpn_1x_coco.py │ │ ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py │ │ ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py │ │ ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py │ │ ├── retinanet_r50_fpn_1x_coco.py │ │ ├── retinanet_r50_fpn_2x_coco.py │ │ ├── retinanet_x101_32x4d_fpn_1x_coco.py │ │ ├── retinanet_x101_32x4d_fpn_2x_coco.py │ │ ├── retinanet_x101_64x4d_fpn_1x_coco.py │ │ └── retinanet_x101_64x4d_fpn_2x_coco.py │ ├── rpn │ │ ├── README.md │ │ ├── rpn_r101_caffe_fpn_1x_coco.py │ │ ├── rpn_r101_fpn_1x_coco.py │ │ ├── rpn_r101_fpn_2x_coco.py │ │ ├── rpn_r50_caffe_c4_1x_coco.py │ │ ├── rpn_r50_caffe_fpn_1x_coco.py │ │ ├── rpn_r50_fpn_1x_coco.py │ │ ├── rpn_r50_fpn_2x_coco.py │ │ ├── rpn_x101_32x4d_fpn_1x_coco.py │ │ ├── rpn_x101_32x4d_fpn_2x_coco.py │ │ ├── rpn_x101_64x4d_fpn_1x_coco.py │ │ └── rpn_x101_64x4d_fpn_2x_coco.py │ ├── scratch │ │ ├── README.md │ │ ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ │ └── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ ├── ssd │ │ ├── README.md │ │ ├── ssd300_coco.py │ │ └── ssd512_coco.py │ └── wider_face │ │ ├── README.md │ │ └── ssd300_wider_face.py ├── demo │ ├── coco_test_12510.jpg │ ├── corruptions_sev_3.png │ ├── data_pipeline.png │ ├── demo.jpg │ ├── dota_demo.png │ ├── huge_image_demo.py │ ├── image_demo.py │ ├── inference_demo.ipynb │ ├── loss_curve.png │ ├── mmdet_inference_colab.ipynb │ ├── obbdet_show.jpg │ └── webcam_demo.py ├── docker │ └── Dockerfile ├── docs │ ├── Makefile │ ├── api.rst │ ├── changelog.md │ ├── compatibility.md │ ├── conf.py │ ├── config.md │ ├── getting_started.md │ ├── index.rst │ ├── install.md │ ├── make.bat │ ├── model_zoo.md │ ├── oriented_model_starting.md │ ├── projects.md │ ├── robustness_benchmarking.md │ └── tutorials │ │ ├── data_pipeline.md │ │ ├── finetune.md │ │ ├── new_dataset.md │ │ └── new_modules.md ├── mmcv_custom │ ├── __init__.py │ └── checkpoint.py ├── mmdet │ ├── VERSION │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ └── huge_img_inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ ├── anchor │ │ │ ├── __init__.py │ │ │ ├── anchor_generator.py │ │ │ ├── builder.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ └── theta0_anchor_generator.py │ │ │ ├── point_generator.py │ │ │ └── utils.py │ │ ├── bbox │ │ │ ├── __init__.py │ │ │ ├── assigners │ │ │ │ ├── __init__.py │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ ├── assign_result.py │ │ │ │ ├── atss_assigner.py │ │ │ │ ├── base_assigner.py │ │ │ │ ├── center_region_assigner.py │ │ │ │ ├── max_iou_assigner.py │ │ │ │ ├── obb2hbb_max_iou_assigner.py │ │ │ │ └── point_assigner.py │ │ │ ├── builder.py │ │ │ ├── coder │ │ │ │ ├── __init__.py │ │ │ │ ├── base_bbox_coder.py │ │ │ │ ├── delta_xywh_bbox_coder.py │ │ │ │ ├── legacy_delta_xywh_bbox_coder.py │ │ │ │ ├── obb │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── gliding_vertex_coders.py │ │ │ │ │ ├── hbb2obb_delta_xywht_coder.py │ │ │ │ │ ├── midpoint_offset_coder.py │ │ │ │ │ └── obb2obb_delta_xywht_coder.py │ │ │ │ ├── pseudo_bbox_coder.py │ │ │ │ └── tblr_bbox_coder.py │ │ │ ├── demodata.py │ │ │ ├── iou_calculators │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ ├── iou2d_calculator.py │ │ │ │ └── obb │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── obbiou_calculator.py │ │ │ ├── samplers │ │ │ │ ├── __init__.py │ │ │ │ ├── base_sampler.py │ │ │ │ ├── combined_sampler.py │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ ├── obb │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── obb_base_sampler.py │ │ │ │ │ ├── obb_ohem_sampler.py │ │ │ │ │ ├── obb_random_sampler.py │ │ │ │ │ └── obb_sampling_result.py │ │ │ │ ├── ohem_sampler.py │ │ │ │ ├── pseudo_sampler.py │ │ │ │ ├── random_sampler.py │ │ │ │ ├── sampling_result.py │ │ │ │ └── score_hlr_sampler.py │ │ │ ├── transforms.py │ │ │ └── transforms_obb │ │ │ │ ├── __init__.py │ │ │ │ ├── form.py │ │ │ │ ├── mapping.py │ │ │ │ └── misc.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── bbox_overlaps.py │ │ │ ├── class_names.py │ │ │ ├── eval_hooks.py │ │ │ ├── mean_ap.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── obb_mean_ap.py │ │ │ │ └── obb_recall.py │ │ │ └── recall.py │ │ ├── fp16 │ │ │ ├── __init__.py │ │ │ ├── decorators.py │ │ │ ├── hooks.py │ │ │ └── utils.py │ │ ├── hooks │ │ │ ├── __init__.py │ │ │ └── random_fp.py │ │ ├── mask │ │ │ ├── __init__.py │ │ │ ├── mask_target.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ └── obb_mask_target.py │ │ │ ├── structures.py │ │ │ └── utils.py │ │ ├── post_processing │ │ │ ├── __init__.py │ │ │ ├── bbox_nms.py │ │ │ ├── merge_augs.py │ │ │ └── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── obb_merge_augs.py │ │ │ │ └── obb_nms.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── dist_utils.py │ │ │ └── misc.py │ ├── datasets │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── cityscapes.py │ │ ├── coco.py │ │ ├── custom.py │ │ ├── dataset_wrappers.py │ │ ├── deepfashion.py │ │ ├── lvis.py │ │ ├── obb │ │ │ ├── __init__.py │ │ │ ├── dior.py │ │ │ ├── dota.py │ │ │ ├── hrsc.py │ │ │ ├── isaid.py │ │ │ ├── msra_td500.py │ │ │ └── rctw17.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── auto_augment.py │ │ │ ├── compose.py │ │ │ ├── formating.py │ │ │ ├── instaboost.py │ │ │ ├── loading.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── dota.py │ │ │ │ └── misc.py │ │ │ ├── test_time_aug.py │ │ │ └── transforms.py │ │ ├── samplers │ │ │ ├── __init__.py │ │ │ ├── distributed_sampler.py │ │ │ └── group_sampler.py │ │ ├── voc.py │ │ ├── wider_face.py │ │ └── xml_style.py │ ├── models │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── ViTAE_Window_NoShift │ │ │ │ ├── NormalCell.py │ │ │ │ ├── ReductionCell.py │ │ │ │ ├── SELayer.py │ │ │ │ ├── __init__.py │ │ │ │ ├── base_model.py │ │ │ │ ├── models.py │ │ │ │ ├── swin.py │ │ │ │ ├── token_performer.py │ │ │ │ └── token_transformer.py │ │ │ ├── __init__.py │ │ │ ├── detectors_resnet.py │ │ │ ├── detectors_resnext.py │ │ │ ├── hourglass.py │ │ │ ├── hrnet.py │ │ │ ├── our_resnet.py │ │ │ ├── regnet.py │ │ │ ├── res2net.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ ├── ssd_vgg.py │ │ │ └── swin_transformer.py │ │ ├── builder.py │ │ ├── dense_heads │ │ │ ├── __init__.py │ │ │ ├── anchor_free_head.py │ │ │ ├── anchor_head.py │ │ │ ├── atss_head.py │ │ │ ├── base_dense_head.py │ │ │ ├── fcos_head.py │ │ │ ├── fovea_head.py │ │ │ ├── free_anchor_retina_head.py │ │ │ ├── fsaf_head.py │ │ │ ├── ga_retina_head.py │ │ │ ├── ga_rpn_head.py │ │ │ ├── gfl_head.py │ │ │ ├── guided_anchor_head.py │ │ │ ├── nasfcos_head.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── obb_anchor_free_head.py │ │ │ │ ├── obb_anchor_head.py │ │ │ │ ├── obb_fcos_head.py │ │ │ │ ├── obb_retina_head.py │ │ │ │ ├── odm_head.py │ │ │ │ ├── oriented_rpn_head.py │ │ │ │ └── s2a_head.py │ │ │ ├── pisa_retinanet_head.py │ │ │ ├── pisa_ssd_head.py │ │ │ ├── reppoints_head.py │ │ │ ├── retina_head.py │ │ │ ├── retina_sepbn_head.py │ │ │ ├── rpn_head.py │ │ │ ├── rpn_test_mixin.py │ │ │ └── ssd_head.py │ │ ├── detectors │ │ │ ├── __init__.py │ │ │ ├── atss.py │ │ │ ├── base.py │ │ │ ├── cascade_rcnn.py │ │ │ ├── fast_rcnn.py │ │ │ ├── faster_rcnn.py │ │ │ ├── fcos.py │ │ │ ├── fovea.py │ │ │ ├── fsaf.py │ │ │ ├── gfl.py │ │ │ ├── grid_rcnn.py │ │ │ ├── htc.py │ │ │ ├── mask_rcnn.py │ │ │ ├── mask_scoring_rcnn.py │ │ │ ├── nasfcos.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── faster_rcnn_obb.py │ │ │ │ ├── fcos_obb.py │ │ │ │ ├── gliding_vertex.py │ │ │ │ ├── obb_base.py │ │ │ │ ├── obb_rpn.py │ │ │ │ ├── obb_single_stage.py │ │ │ │ ├── obb_test_mixins.py │ │ │ │ ├── obb_two_stage.py │ │ │ │ ├── oriented_rcnn.py │ │ │ │ ├── retinanet_obb.py │ │ │ │ ├── roi_transformer.py │ │ │ │ └── s2anet.py │ │ │ ├── point_rend.py │ │ │ ├── reppoints_detector.py │ │ │ ├── retinanet.py │ │ │ ├── rpn.py │ │ │ ├── single_stage.py │ │ │ └── two_stage.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── accuracy.py │ │ │ ├── ae_loss.py │ │ │ ├── balanced_l1_loss.py │ │ │ ├── cross_entropy_loss.py │ │ │ ├── focal_loss.py │ │ │ ├── gaussian_focal_loss.py │ │ │ ├── gfocal_loss.py │ │ │ ├── ghm_loss.py │ │ │ ├── iou_loss.py │ │ │ ├── mse_loss.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ └── poly_iou_loss.py │ │ │ ├── pisa_loss.py │ │ │ ├── smooth_l1_loss.py │ │ │ └── utils.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ ├── bfp.py │ │ │ ├── fpn.py │ │ │ ├── fpn_carafe.py │ │ │ ├── hrfpn.py │ │ │ ├── nas_fpn.py │ │ │ ├── nasfcos_fpn.py │ │ │ ├── pafpn.py │ │ │ └── rfp.py │ │ ├── roi_heads │ │ │ ├── __init__.py │ │ │ ├── base_roi_head.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_head.py │ │ │ │ ├── convfc_bbox_head.py │ │ │ │ ├── double_bbox_head.py │ │ │ │ └── obb │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── gv_bbox_head.py │ │ │ │ │ ├── obb_convfc_bbox_head.py │ │ │ │ │ ├── obb_double_bbox_head.py │ │ │ │ │ └── obbox_head.py │ │ │ ├── cascade_roi_head.py │ │ │ ├── double_roi_head.py │ │ │ ├── dynamic_roi_head.py │ │ │ ├── grid_roi_head.py │ │ │ ├── htc_roi_head.py │ │ │ ├── mask_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── coarse_mask_head.py │ │ │ │ ├── fcn_mask_head.py │ │ │ │ ├── fused_semantic_head.py │ │ │ │ ├── grid_head.py │ │ │ │ ├── htc_mask_head.py │ │ │ │ ├── mask_point_head.py │ │ │ │ └── maskiou_head.py │ │ │ ├── mask_scoring_roi_head.py │ │ │ ├── obb │ │ │ │ ├── __init__.py │ │ │ │ ├── gv_ratio_roi_head.py │ │ │ │ ├── obb_base_roi_head.py │ │ │ │ ├── obb_double_roi_head.py │ │ │ │ ├── obb_standard_roi_head.py │ │ │ │ ├── obb_test_mixins.py │ │ │ │ └── roitrans_roi_head.py │ │ │ ├── pisa_roi_head.py │ │ │ ├── point_rend_roi_head.py │ │ │ ├── roi_extractors │ │ │ │ ├── __init__.py │ │ │ │ ├── base_roi_extractor.py │ │ │ │ ├── generic_roi_extractor.py │ │ │ │ ├── obb │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── hbb_select_level_roi_extractor.py │ │ │ │ │ ├── obb_base_roi_extractor.py │ │ │ │ │ └── obb_single_level_roi_extractor.py │ │ │ │ └── single_level_roi_extractor.py │ │ │ ├── shared_heads │ │ │ │ ├── __init__.py │ │ │ │ └── res_layer.py │ │ │ ├── standard_roi_head.py │ │ │ └── test_mixins.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── res_layer.py │ ├── ops │ │ ├── __init__.py │ │ ├── box_iou_rotated │ │ │ ├── __init__.py │ │ │ ├── box_iou_rotated_wrapper.py │ │ │ └── src │ │ │ │ ├── box_iou_rotated_cpu.cpp │ │ │ │ ├── box_iou_rotated_cuda.cu │ │ │ │ ├── box_iou_rotated_ext.cpp │ │ │ │ └── box_iou_rotated_utils.h │ │ ├── context_block.py │ │ ├── convex │ │ │ ├── __init__.py │ │ │ ├── convex_wrapper.py │ │ │ └── src │ │ │ │ ├── convex_cpu.cpp │ │ │ │ ├── convex_cuda.cu │ │ │ │ └── convex_ext.cpp │ │ ├── corner_pool │ │ │ ├── __init__.py │ │ │ ├── corner_pool.py │ │ │ └── src │ │ │ │ └── corner_pool.cpp │ │ ├── generalized_attention.py │ │ ├── masked_conv │ │ │ ├── __init__.py │ │ │ ├── masked_conv.py │ │ │ └── src │ │ │ │ ├── cuda │ │ │ │ ├── masked_conv2d_cuda.cpp │ │ │ │ └── masked_conv2d_kernel.cu │ │ │ │ └── masked_conv2d_ext.cpp │ │ ├── merge_cells.py │ │ ├── nms │ │ │ ├── __init__.py │ │ │ ├── nms_wrapper.py │ │ │ └── src │ │ │ │ ├── cpu │ │ │ │ └── nms_cpu.cpp │ │ │ │ ├── cuda │ │ │ │ ├── nms_cuda.cpp │ │ │ │ └── nms_kernel.cu │ │ │ │ └── nms_ext.cpp │ │ ├── nms_rotated │ │ │ ├── __init__.py │ │ │ ├── nms_rotated_wrapper.py │ │ │ └── src │ │ │ │ ├── box_iou_rotated_utils.h │ │ │ │ ├── nms_rotated_cpu.cpp │ │ │ │ ├── nms_rotated_cuda.cu │ │ │ │ ├── nms_rotated_ext.cpp │ │ │ │ ├── poly_nms_cpu.cpp │ │ │ │ └── poly_nms_cuda.cu │ │ ├── non_local.py │ │ ├── orn │ │ │ ├── __init__.py │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ ├── active_rotating_filter.py │ │ │ │ ├── rotation_invariant_encoding.py │ │ │ │ └── rotation_invariant_pooling.py │ │ │ ├── modules │ │ │ │ ├── ORConv.py │ │ │ │ └── __init__.py │ │ │ └── src │ │ │ │ ├── ActiveRotatingFilter.h │ │ │ │ ├── RotationInvariantEncoding.h │ │ │ │ ├── cpu │ │ │ │ ├── ActiveRotatingFilter_cpu.cpp │ │ │ │ ├── RotationInvariantEncoding_cpu.cpp │ │ │ │ └── vision.h │ │ │ │ ├── cuda │ │ │ │ ├── ActiveRotatingFilter_cuda.cu │ │ │ │ ├── RotationInvariantEncoding_cuda.cu │ │ │ │ └── vision.h │ │ │ │ └── vision.cpp │ │ ├── plugin.py │ │ ├── point_sample.py │ │ ├── roi_align │ │ │ ├── __init__.py │ │ │ ├── gradcheck.py │ │ │ ├── roi_align.py │ │ │ └── src │ │ │ │ ├── cpu │ │ │ │ └── roi_align_v2.cpp │ │ │ │ ├── cuda │ │ │ │ ├── roi_align_kernel.cu │ │ │ │ └── roi_align_kernel_v2.cu │ │ │ │ └── roi_align_ext.cpp │ │ ├── roi_align_rotated │ │ │ ├── __init__.py │ │ │ ├── roi_align_rotated.py │ │ │ └── src │ │ │ │ ├── roi_align_rotated_cpu.cpp │ │ │ │ ├── roi_align_rotated_cuda.cu │ │ │ │ ├── roi_align_rotated_ext.cpp │ │ │ │ └── temp │ │ │ │ ├── roi_align_rotated_cpu.cpp │ │ │ │ ├── roi_align_rotated_cuda.cu │ │ │ │ └── roi_align_rotated_ext.cpp │ │ ├── roi_pool │ │ │ ├── __init__.py │ │ │ ├── gradcheck.py │ │ │ ├── roi_pool.py │ │ │ └── src │ │ │ │ ├── cuda │ │ │ │ └── roi_pool_kernel.cu │ │ │ │ └── roi_pool_ext.cpp │ │ ├── sigmoid_focal_loss │ │ │ ├── __init__.py │ │ │ ├── sigmoid_focal_loss.py │ │ │ └── src │ │ │ │ ├── cuda │ │ │ │ └── sigmoid_focal_loss_cuda.cu │ │ │ │ └── sigmoid_focal_loss_ext.cpp │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── src │ │ │ │ └── compiling_info.cpp │ │ └── wrappers.py │ └── utils │ │ ├── __init__.py │ │ ├── collect_env.py │ │ ├── contextmanagers.py │ │ ├── logger.py │ │ ├── profiling.py │ │ └── util_mixins.py ├── pytest.ini ├── requirements.txt ├── requirements │ ├── build.txt │ ├── docs.txt │ ├── optional.txt │ ├── readthedocs.txt │ ├── runtime.txt │ └── tests.txt ├── setup.py ├── tests │ ├── async_benchmark.py │ ├── test_anchor.py │ ├── test_assigner.py │ ├── test_async.py │ ├── test_backbone.py │ ├── test_config.py │ ├── test_dataset.py │ ├── test_forward.py │ ├── test_fp16.py │ ├── test_heads.py │ ├── test_losses.py │ ├── test_masks.py │ ├── test_necks.py │ ├── test_ops │ │ ├── test_corner_pool.py │ │ ├── test_merge_cells.py │ │ ├── test_nms.py │ │ ├── test_soft_nms.py │ │ └── test_wrappers.py │ ├── test_pipelines │ │ ├── test_formatting.py │ │ ├── test_loading.py │ │ ├── test_models_aug_test.py │ │ └── test_transform.py │ ├── test_pisa_heads.py │ ├── test_roi_extractor.py │ └── test_sampler.py └── tools │ ├── analyze_logs.py │ ├── benchmark.py │ ├── browse_dataset.py │ ├── coco_error_analysis.py │ ├── convert_datasets │ ├── cityscapes.py │ └── pascal_voc.py │ ├── detectron2pytorch.py │ ├── dist_test.sh │ ├── dist_train.sh │ ├── fuse_conv_bn.py │ ├── get_flops.py │ ├── print_config.py │ ├── publish_model.py │ ├── pytorch2onnx.py │ ├── regnet2mmdet.py │ ├── robustness_eval.py │ ├── slurm_test.sh │ ├── slurm_train.sh │ ├── test.py │ ├── test_robustness.py │ ├── train.py │ └── upgrade_model_version.py ├── README.md ├── Scene Recognition ├── README.md ├── __pycache__ │ ├── config.cpython-38.pyc │ ├── logger.cpython-38.pyc │ ├── lr_scheduler.cpython-38.pyc │ ├── optimizer.cpython-38.pyc │ └── utils.cpython-38.pyc ├── config.py ├── configs │ └── swin_tiny_patch4_window7_224.yaml ├── data │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── build.cpython-38.pyc │ │ ├── cached_image_folder.cpython-38.pyc │ │ ├── samplers.cpython-38.pyc │ │ └── zipreader.cpython-38.pyc │ ├── build.py │ ├── cached_image_folder.py │ ├── map22kto1k.txt │ ├── samplers.py │ └── zipreader.py ├── logger.py ├── lr_scheduler.py ├── main.py ├── master_addr ├── models │ ├── ViTAE_Window_NoShift │ │ ├── NormalCell.py │ │ ├── ReductionCell.py │ │ ├── SELayer.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── NormalCell.cpython-38.pyc │ │ │ ├── ReductionCell.cpython-38.pyc │ │ │ ├── SELayer.cpython-38.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── base_model.cpython-38.pyc │ │ │ ├── models.cpython-38.pyc │ │ │ ├── swin.cpython-38.pyc │ │ │ ├── token_performer.cpython-38.pyc │ │ │ └── token_transformer.cpython-38.pyc │ │ ├── base_model.py │ │ ├── models.py │ │ ├── swin.py │ │ ├── token_performer.py │ │ └── token_transformer.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── build.cpython-38.pyc │ │ ├── resnet.cpython-38.pyc │ │ ├── swin_mlp.cpython-38.pyc │ │ └── swin_transformer.cpython-38.pyc │ ├── build.py │ ├── resnet.py │ └── swin_transformer.py ├── optimizer.py └── utils.py ├── Semantic Segmentation ├── .circleci │ └── config.yml ├── .dev │ ├── batch_test_list.py │ ├── batch_train_list.txt │ ├── benchmark_evaluation.sh │ ├── benchmark_inference.py │ ├── benchmark_train.sh │ ├── check_urls.py │ ├── gather_benchmark_evaluation_results.py │ ├── gather_benchmark_train_results.py │ ├── gather_models.py │ ├── generate_benchmark_evaluation_script.py │ ├── generate_benchmark_train_script.py │ ├── log_collector │ │ ├── example_config.py │ │ ├── log_collector.py │ │ ├── readme.md │ │ └── utils.py │ ├── md2yml.py │ └── upload_modelzoo.py ├── .github │ ├── CODE_OF_CONDUCT.md │ ├── CONTRIBUTING.md │ ├── ISSUE_TEMPLATE │ │ ├── config.yml │ │ ├── error-report.md │ │ ├── feature_request.md │ │ ├── general_questions.md │ │ └── reimplementation_questions.md │ ├── pull_request_template.md │ └── workflows │ │ ├── build.yml │ │ ├── deploy.yml │ │ └── lint.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── LICENSE ├── MANIFEST.in ├── README.md ├── configs │ ├── _base_ │ │ ├── datasets │ │ │ ├── ade20k.py │ │ │ ├── chase_db1.py │ │ │ ├── cityscapes.py │ │ │ ├── cityscapes_1024x1024.py │ │ │ ├── cityscapes_768x768.py │ │ │ ├── cityscapes_769x769.py │ │ │ ├── cityscapes_832x832.py │ │ │ ├── coco-stuff10k.py │ │ │ ├── coco-stuff164k.py │ │ │ ├── drive.py │ │ │ ├── hrf.py │ │ │ ├── isaid.py │ │ │ ├── loveda.py │ │ │ ├── pascal_context.py │ │ │ ├── pascal_context_59.py │ │ │ ├── pascal_voc12.py │ │ │ ├── pascal_voc12_aug.py │ │ │ ├── potsdam.py │ │ │ ├── stare.py │ │ │ └── vaihingen.py │ │ ├── default_runtime.py │ │ ├── models │ │ │ ├── ann_r50-d8.py │ │ │ ├── apcnet_r50-d8.py │ │ │ ├── bisenetv1_r18-d32.py │ │ │ ├── bisenetv2.py │ │ │ ├── ccnet_r50-d8.py │ │ │ ├── cgnet.py │ │ │ ├── danet_r50-d8.py │ │ │ ├── deeplabv3_r50-d8.py │ │ │ ├── deeplabv3_unet_s5-d16.py │ │ │ ├── deeplabv3plus_r50-d8.py │ │ │ ├── dmnet_r50-d8.py │ │ │ ├── dnl_r50-d8.py │ │ │ ├── dpt_vit-b16.py │ │ │ ├── emanet_r50-d8.py │ │ │ ├── encnet_r50-d8.py │ │ │ ├── erfnet_fcn.py │ │ │ ├── fast_scnn.py │ │ │ ├── fastfcn_r50-d32_jpu_psp.py │ │ │ ├── fcn_hr18.py │ │ │ ├── fcn_r50-d8.py │ │ │ ├── fcn_unet_s5-d16.py │ │ │ ├── fpn_our_r50.py │ │ │ ├── fpn_r50.py │ │ │ ├── fpn_swin.py │ │ │ ├── fpn_vitae.py │ │ │ ├── gcnet_r50-d8.py │ │ │ ├── icnet_r50-d8.py │ │ │ ├── isanet_r50-d8.py │ │ │ ├── lraspp_m-v3-d8.py │ │ │ ├── nonlocal_r50-d8.py │ │ │ ├── ocrnet_hr18.py │ │ │ ├── ocrnet_r50-d8.py │ │ │ ├── pointrend_r50.py │ │ │ ├── psanet_r50-d8.py │ │ │ ├── pspnet_r50-d8.py │ │ │ ├── pspnet_unet_s5-d16.py │ │ │ ├── segformer_mit-b0.py │ │ │ ├── segmenter_vit-b16_mask.py │ │ │ ├── setr_mla.py │ │ │ ├── setr_naive.py │ │ │ ├── setr_pup.py │ │ │ ├── stdc.py │ │ │ ├── twins_pcpvt-s_fpn.py │ │ │ ├── twins_pcpvt-s_upernet.py │ │ │ ├── upernet_our_r50.py │ │ │ ├── upernet_r50.py │ │ │ ├── upernet_swin.py │ │ │ ├── upernet_swin_new.py │ │ │ ├── upernet_vit-b16_ln_mln.py │ │ │ └── upernet_vitae.py │ │ └── schedules │ │ │ ├── schedule_100.py │ │ │ ├── schedule_160k.py │ │ │ ├── schedule_20k.py │ │ │ ├── schedule_320k.py │ │ │ ├── schedule_40k.py │ │ │ └── schedule_80k.py │ ├── ann │ │ ├── README.md │ │ ├── ann.yml │ │ ├── ann_r101-d8_512x1024_40k_cityscapes.py │ │ ├── ann_r101-d8_512x1024_80k_cityscapes.py │ │ ├── ann_r101-d8_512x512_160k_ade20k.py │ │ ├── ann_r101-d8_512x512_20k_voc12aug.py │ │ ├── ann_r101-d8_512x512_40k_voc12aug.py │ │ ├── ann_r101-d8_512x512_80k_ade20k.py │ │ ├── ann_r101-d8_769x769_40k_cityscapes.py │ │ ├── ann_r101-d8_769x769_80k_cityscapes.py │ │ ├── ann_r50-d8_512x1024_40k_cityscapes.py │ │ ├── ann_r50-d8_512x1024_80k_cityscapes.py │ │ ├── ann_r50-d8_512x512_160k_ade20k.py │ │ ├── ann_r50-d8_512x512_20k_voc12aug.py │ │ ├── ann_r50-d8_512x512_40k_voc12aug.py │ │ ├── ann_r50-d8_512x512_80k_ade20k.py │ │ ├── ann_r50-d8_769x769_40k_cityscapes.py │ │ └── ann_r50-d8_769x769_80k_cityscapes.py │ ├── apcnet │ │ ├── README.md │ │ ├── apcnet.yml │ │ ├── apcnet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── apcnet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── apcnet_r101-d8_512x512_160k_ade20k.py │ │ ├── apcnet_r101-d8_512x512_80k_ade20k.py │ │ ├── apcnet_r101-d8_769x769_40k_cityscapes.py │ │ ├── apcnet_r101-d8_769x769_80k_cityscapes.py │ │ ├── apcnet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── apcnet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── apcnet_r50-d8_512x512_160k_ade20k.py │ │ ├── apcnet_r50-d8_512x512_80k_ade20k.py │ │ ├── apcnet_r50-d8_769x769_40k_cityscapes.py │ │ └── apcnet_r50-d8_769x769_80k_cityscapes.py │ ├── bisenetv1 │ │ ├── README.md │ │ ├── bisenetv1.yml │ │ ├── bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py │ │ ├── bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py │ │ ├── bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py │ │ ├── bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py │ │ ├── bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py │ │ ├── bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py │ │ ├── bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py │ │ ├── bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py │ │ ├── bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py │ │ ├── bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py │ │ └── bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py │ ├── bisenetv2 │ │ ├── README.md │ │ ├── bisenetv2.yml │ │ ├── bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py │ │ ├── bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py │ │ ├── bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py │ │ └── bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py │ ├── ccnet │ │ ├── README.md │ │ ├── ccnet.yml │ │ ├── ccnet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── ccnet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── ccnet_r101-d8_512x512_160k_ade20k.py │ │ ├── ccnet_r101-d8_512x512_20k_voc12aug.py │ │ ├── ccnet_r101-d8_512x512_40k_voc12aug.py │ │ ├── ccnet_r101-d8_512x512_80k_ade20k.py │ │ ├── ccnet_r101-d8_769x769_40k_cityscapes.py │ │ ├── ccnet_r101-d8_769x769_80k_cityscapes.py │ │ ├── ccnet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── ccnet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── ccnet_r50-d8_512x512_160k_ade20k.py │ │ ├── ccnet_r50-d8_512x512_20k_voc12aug.py │ │ ├── ccnet_r50-d8_512x512_40k_voc12aug.py │ │ ├── ccnet_r50-d8_512x512_80k_ade20k.py │ │ ├── ccnet_r50-d8_769x769_40k_cityscapes.py │ │ └── ccnet_r50-d8_769x769_80k_cityscapes.py │ ├── cgnet │ │ ├── README.md │ │ ├── cgnet.yml │ │ ├── cgnet_512x1024_60k_cityscapes.py │ │ └── cgnet_680x680_60k_cityscapes.py │ ├── danet │ │ ├── README.md │ │ ├── danet.yml │ │ ├── danet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── danet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── danet_r101-d8_512x512_160k_ade20k.py │ │ ├── danet_r101-d8_512x512_20k_voc12aug.py │ │ ├── danet_r101-d8_512x512_40k_voc12aug.py │ │ ├── danet_r101-d8_512x512_80k_ade20k.py │ │ ├── danet_r101-d8_769x769_40k_cityscapes.py │ │ ├── danet_r101-d8_769x769_80k_cityscapes.py │ │ ├── danet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── danet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── danet_r50-d8_512x512_160k_ade20k.py │ │ ├── danet_r50-d8_512x512_20k_voc12aug.py │ │ ├── danet_r50-d8_512x512_40k_voc12aug.py │ │ ├── danet_r50-d8_512x512_80k_ade20k.py │ │ ├── danet_r50-d8_769x769_40k_cityscapes.py │ │ └── danet_r50-d8_769x769_80k_cityscapes.py │ ├── deeplabv3 │ │ ├── README.md │ │ ├── deeplabv3.yml │ │ ├── deeplabv3_r101-d16-mg124_512x1024_40k_cityscapes.py │ │ ├── deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_r101-d8_480x480_40k_pascal_context.py │ │ ├── deeplabv3_r101-d8_480x480_40k_pascal_context_59.py │ │ ├── deeplabv3_r101-d8_480x480_80k_pascal_context.py │ │ ├── deeplabv3_r101-d8_480x480_80k_pascal_context_59.py │ │ ├── deeplabv3_r101-d8_512x1024_40k_cityscapes.py │ │ ├── deeplabv3_r101-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_r101-d8_512x512_160k_ade20k.py │ │ ├── deeplabv3_r101-d8_512x512_20k_voc12aug.py │ │ ├── deeplabv3_r101-d8_512x512_40k_voc12aug.py │ │ ├── deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py │ │ ├── deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py │ │ ├── deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py │ │ ├── deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py │ │ ├── deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py │ │ ├── deeplabv3_r101-d8_512x512_80k_ade20k.py │ │ ├── deeplabv3_r101-d8_769x769_40k_cityscapes.py │ │ ├── deeplabv3_r101-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_r101b-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_r101b-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3_r18-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_r18-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3_r18b-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_r18b-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3_r50-d8_480x480_40k_pascal_context.py │ │ ├── deeplabv3_r50-d8_480x480_40k_pascal_context_59.py │ │ ├── deeplabv3_r50-d8_480x480_80k_pascal_context.py │ │ ├── deeplabv3_r50-d8_480x480_80k_pascal_context_59.py │ │ ├── deeplabv3_r50-d8_512x1024_40k_cityscapes.py │ │ ├── deeplabv3_r50-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_r50-d8_512x512_160k_ade20k.py │ │ ├── deeplabv3_r50-d8_512x512_20k_voc12aug.py │ │ ├── deeplabv3_r50-d8_512x512_40k_voc12aug.py │ │ ├── deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py │ │ ├── deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py │ │ ├── deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py │ │ ├── deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py │ │ ├── deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py │ │ ├── deeplabv3_r50-d8_512x512_80k_ade20k.py │ │ ├── deeplabv3_r50-d8_769x769_40k_cityscapes.py │ │ ├── deeplabv3_r50-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3_r50b-d8_512x1024_80k_cityscapes.py │ │ └── deeplabv3_r50b-d8_769x769_80k_cityscapes.py │ ├── deeplabv3plus │ │ ├── README.md │ │ ├── deeplabv3plus.yml │ │ ├── deeplabv3plus_r101-d16-mg124_512x1024_40k_cityscapes.py │ │ ├── deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_r101-d8_480x480_40k_pascal_context.py │ │ ├── deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py │ │ ├── deeplabv3plus_r101-d8_480x480_80k_pascal_context.py │ │ ├── deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py │ │ ├── deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen.py │ │ ├── deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py │ │ ├── deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_r101-d8_512x512_160k_ade20k.py │ │ ├── deeplabv3plus_r101-d8_512x512_20k_voc12aug.py │ │ ├── deeplabv3plus_r101-d8_512x512_40k_voc12aug.py │ │ ├── deeplabv3plus_r101-d8_512x512_80k_ade20k.py │ │ ├── deeplabv3plus_r101-d8_512x512_80k_loveda.py │ │ ├── deeplabv3plus_r101-d8_512x512_80k_potsdam.py │ │ ├── deeplabv3plus_r101-d8_769x769_40k_cityscapes.py │ │ ├── deeplabv3plus_r101-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3plus_r18-d8_4x4_512x512_80k_vaihingen.py │ │ ├── deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_r18-d8_512x512_80k_loveda.py │ │ ├── deeplabv3plus_r18-d8_512x512_80k_potsdam.py │ │ ├── deeplabv3plus_r18-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3plus_r50-d8_480x480_40k_pascal_context.py │ │ ├── deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py │ │ ├── deeplabv3plus_r50-d8_480x480_80k_pascal_context.py │ │ ├── deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py │ │ ├── deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py │ │ ├── deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py │ │ ├── deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_r50-d8_512x512_160k_ade20k.py │ │ ├── deeplabv3plus_r50-d8_512x512_20k_voc12aug.py │ │ ├── deeplabv3plus_r50-d8_512x512_40k_voc12aug.py │ │ ├── deeplabv3plus_r50-d8_512x512_80k_ade20k.py │ │ ├── deeplabv3plus_r50-d8_512x512_80k_loveda.py │ │ ├── deeplabv3plus_r50-d8_512x512_80k_potsdam.py │ │ ├── deeplabv3plus_r50-d8_769x769_40k_cityscapes.py │ │ ├── deeplabv3plus_r50-d8_769x769_80k_cityscapes.py │ │ ├── deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py │ │ └── deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py │ ├── dmnet │ │ ├── README.md │ │ ├── dmnet.yml │ │ ├── dmnet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── dmnet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── dmnet_r101-d8_512x512_160k_ade20k.py │ │ ├── dmnet_r101-d8_512x512_80k_ade20k.py │ │ ├── dmnet_r101-d8_769x769_40k_cityscapes.py │ │ ├── dmnet_r101-d8_769x769_80k_cityscapes.py │ │ ├── dmnet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── dmnet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── dmnet_r50-d8_512x512_160k_ade20k.py │ │ ├── dmnet_r50-d8_512x512_80k_ade20k.py │ │ ├── dmnet_r50-d8_769x769_40k_cityscapes.py │ │ └── dmnet_r50-d8_769x769_80k_cityscapes.py │ ├── dnlnet │ │ ├── README.md │ │ ├── dnl_r101-d8_512x1024_40k_cityscapes.py │ │ ├── dnl_r101-d8_512x1024_80k_cityscapes.py │ │ ├── dnl_r101-d8_512x512_160k_ade20k.py │ │ ├── dnl_r101-d8_512x512_80k_ade20k.py │ │ ├── dnl_r101-d8_769x769_40k_cityscapes.py │ │ ├── dnl_r101-d8_769x769_80k_cityscapes.py │ │ ├── dnl_r50-d8_512x1024_40k_cityscapes.py │ │ ├── dnl_r50-d8_512x1024_80k_cityscapes.py │ │ ├── dnl_r50-d8_512x512_160k_ade20k.py │ │ ├── dnl_r50-d8_512x512_80k_ade20k.py │ │ ├── dnl_r50-d8_769x769_40k_cityscapes.py │ │ ├── dnl_r50-d8_769x769_80k_cityscapes.py │ │ └── dnlnet.yml │ ├── dpt │ │ ├── README.md │ │ ├── dpt.yml │ │ └── dpt_vit-b16_512x512_160k_ade20k.py │ ├── emanet │ │ ├── README.md │ │ ├── emanet.yml │ │ ├── emanet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── emanet_r101-d8_769x769_80k_cityscapes.py │ │ ├── emanet_r50-d8_512x1024_80k_cityscapes.py │ │ └── emanet_r50-d8_769x769_80k_cityscapes.py │ ├── encnet │ │ ├── README.md │ │ ├── encnet.yml │ │ ├── encnet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── encnet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── encnet_r101-d8_512x512_160k_ade20k.py │ │ ├── encnet_r101-d8_512x512_20k_voc12aug.py │ │ ├── encnet_r101-d8_512x512_40k_voc12aug.py │ │ ├── encnet_r101-d8_512x512_80k_ade20k.py │ │ ├── encnet_r101-d8_769x769_40k_cityscapes.py │ │ ├── encnet_r101-d8_769x769_80k_cityscapes.py │ │ ├── encnet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── encnet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── encnet_r50-d8_512x512_160k_ade20k.py │ │ ├── encnet_r50-d8_512x512_20k_voc12aug.py │ │ ├── encnet_r50-d8_512x512_40k_voc12aug.py │ │ ├── encnet_r50-d8_512x512_80k_ade20k.py │ │ ├── encnet_r50-d8_769x769_40k_cityscapes.py │ │ ├── encnet_r50-d8_769x769_80k_cityscapes.py │ │ └── encnet_r50s-d8_512x512_80k_ade20k.py │ ├── erfnet │ │ ├── README.md │ │ ├── erfnet.yml │ │ └── erfnet_fcn_4x4_512x1024_160k_cityscapes.py │ ├── fastfcn │ │ ├── README.md │ │ ├── fastfcn.yml │ │ ├── fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py │ │ ├── fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py │ │ ├── fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py │ │ ├── fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py │ │ ├── fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py │ │ ├── fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py │ │ ├── fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py │ │ ├── fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py │ │ ├── fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py │ │ ├── fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py │ │ ├── fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py │ │ └── fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py │ ├── fastscnn │ │ ├── README.md │ │ ├── fast_scnn_lr0.12_8x4_160k_cityscapes.py │ │ └── fastscnn.yml │ ├── fcn │ │ ├── README.md │ │ ├── fcn.yml │ │ ├── fcn_d6_r101-d16_512x1024_40k_cityscapes.py │ │ ├── fcn_d6_r101-d16_512x1024_80k_cityscapes.py │ │ ├── fcn_d6_r101-d16_769x769_40k_cityscapes.py │ │ ├── fcn_d6_r101-d16_769x769_80k_cityscapes.py │ │ ├── fcn_d6_r101b-d16_512x1024_80k_cityscapes.py │ │ ├── fcn_d6_r101b-d16_769x769_80k_cityscapes.py │ │ ├── fcn_d6_r50-d16_512x1024_40k_cityscapes.py │ │ ├── fcn_d6_r50-d16_512x1024_80k_cityscapes.py │ │ ├── fcn_d6_r50-d16_769x769_40k_cityscapes.py │ │ ├── fcn_d6_r50-d16_769x769_80k_cityscapes.py │ │ ├── fcn_d6_r50b-d16_512x1024_80k_cityscapes.py │ │ ├── fcn_d6_r50b-d16_769x769_80k_cityscapes.py │ │ ├── fcn_r101-d8_480x480_40k_pascal_context.py │ │ ├── fcn_r101-d8_480x480_40k_pascal_context_59.py │ │ ├── fcn_r101-d8_480x480_80k_pascal_context.py │ │ ├── fcn_r101-d8_480x480_80k_pascal_context_59.py │ │ ├── fcn_r101-d8_512x1024_40k_cityscapes.py │ │ ├── fcn_r101-d8_512x1024_80k_cityscapes.py │ │ ├── fcn_r101-d8_512x512_160k_ade20k.py │ │ ├── fcn_r101-d8_512x512_20k_voc12aug.py │ │ ├── fcn_r101-d8_512x512_40k_voc12aug.py │ │ ├── fcn_r101-d8_512x512_80k_ade20k.py │ │ ├── fcn_r101-d8_769x769_40k_cityscapes.py │ │ ├── fcn_r101-d8_769x769_80k_cityscapes.py │ │ ├── fcn_r101-d8_fp16_512x1024_80k_cityscapes.py │ │ ├── fcn_r101b-d8_512x1024_80k_cityscapes.py │ │ ├── fcn_r101b-d8_769x769_80k_cityscapes.py │ │ ├── fcn_r18-d8_512x1024_80k_cityscapes.py │ │ ├── fcn_r18-d8_769x769_80k_cityscapes.py │ │ ├── fcn_r18b-d8_512x1024_80k_cityscapes.py │ │ ├── fcn_r18b-d8_769x769_80k_cityscapes.py │ │ ├── fcn_r50-d8_480x480_40k_pascal_context.py │ │ ├── fcn_r50-d8_480x480_40k_pascal_context_59.py │ │ ├── fcn_r50-d8_480x480_80k_pascal_context.py │ │ ├── fcn_r50-d8_480x480_80k_pascal_context_59.py │ │ ├── fcn_r50-d8_512x1024_40k_cityscapes.py │ │ ├── fcn_r50-d8_512x1024_80k_cityscapes.py │ │ ├── fcn_r50-d8_512x512_160k_ade20k.py │ │ ├── fcn_r50-d8_512x512_20k_voc12aug.py │ │ ├── fcn_r50-d8_512x512_40k_voc12aug.py │ │ ├── fcn_r50-d8_512x512_80k_ade20k.py │ │ ├── fcn_r50-d8_769x769_40k_cityscapes.py │ │ ├── fcn_r50-d8_769x769_80k_cityscapes.py │ │ ├── fcn_r50b-d8_512x1024_80k_cityscapes.py │ │ └── fcn_r50b-d8_769x769_80k_cityscapes.py │ ├── gcnet │ │ ├── README.md │ │ ├── gcnet.yml │ │ ├── gcnet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── gcnet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── gcnet_r101-d8_512x512_160k_ade20k.py │ │ ├── gcnet_r101-d8_512x512_20k_voc12aug.py │ │ ├── gcnet_r101-d8_512x512_40k_voc12aug.py │ │ ├── gcnet_r101-d8_512x512_80k_ade20k.py │ │ ├── gcnet_r101-d8_769x769_40k_cityscapes.py │ │ ├── gcnet_r101-d8_769x769_80k_cityscapes.py │ │ ├── gcnet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── gcnet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── gcnet_r50-d8_512x512_160k_ade20k.py │ │ ├── gcnet_r50-d8_512x512_20k_voc12aug.py │ │ ├── gcnet_r50-d8_512x512_40k_voc12aug.py │ │ ├── gcnet_r50-d8_512x512_80k_ade20k.py │ │ ├── gcnet_r50-d8_769x769_40k_cityscapes.py │ │ └── gcnet_r50-d8_769x769_80k_cityscapes.py │ ├── hrnet │ │ ├── README.md │ │ ├── fcn_hr18_480x480_40k_pascal_context.py │ │ ├── fcn_hr18_480x480_40k_pascal_context_59.py │ │ ├── fcn_hr18_480x480_80k_pascal_context.py │ │ ├── fcn_hr18_480x480_80k_pascal_context_59.py │ │ ├── fcn_hr18_4x4_512x512_80k_vaihingen.py │ │ ├── fcn_hr18_512x1024_160k_cityscapes.py │ │ ├── fcn_hr18_512x1024_40k_cityscapes.py │ │ ├── fcn_hr18_512x1024_80k_cityscapes.py │ │ ├── fcn_hr18_512x512_160k_ade20k.py │ │ ├── fcn_hr18_512x512_20k_voc12aug.py │ │ ├── fcn_hr18_512x512_40k_voc12aug.py │ │ ├── fcn_hr18_512x512_80k_ade20k.py │ │ ├── fcn_hr18_512x512_80k_loveda.py │ │ ├── fcn_hr18_512x512_80k_potsdam.py │ │ ├── fcn_hr18s_480x480_40k_pascal_context.py │ │ ├── fcn_hr18s_480x480_40k_pascal_context_59.py │ │ ├── fcn_hr18s_480x480_80k_pascal_context.py │ │ ├── fcn_hr18s_480x480_80k_pascal_context_59.py │ │ ├── fcn_hr18s_4x4_512x512_80k_vaihingen.py │ │ ├── fcn_hr18s_512x1024_160k_cityscapes.py │ │ ├── fcn_hr18s_512x1024_40k_cityscapes.py │ │ ├── fcn_hr18s_512x1024_80k_cityscapes.py │ │ ├── fcn_hr18s_512x512_160k_ade20k.py │ │ ├── fcn_hr18s_512x512_20k_voc12aug.py │ │ ├── fcn_hr18s_512x512_40k_voc12aug.py │ │ ├── fcn_hr18s_512x512_80k_ade20k.py │ │ ├── fcn_hr18s_512x512_80k_loveda.py │ │ ├── fcn_hr18s_512x512_80k_potsdam.py │ │ ├── fcn_hr48_480x480_40k_pascal_context.py │ │ ├── fcn_hr48_480x480_40k_pascal_context_59.py │ │ ├── fcn_hr48_480x480_80k_pascal_context.py │ │ ├── fcn_hr48_480x480_80k_pascal_context_59.py │ │ ├── fcn_hr48_4x4_512x512_80k_vaihingen.py │ │ ├── fcn_hr48_512x1024_160k_cityscapes.py │ │ ├── fcn_hr48_512x1024_40k_cityscapes.py │ │ ├── fcn_hr48_512x1024_80k_cityscapes.py │ │ ├── fcn_hr48_512x512_160k_ade20k.py │ │ ├── fcn_hr48_512x512_20k_voc12aug.py │ │ ├── fcn_hr48_512x512_40k_voc12aug.py │ │ ├── fcn_hr48_512x512_80k_ade20k.py │ │ ├── fcn_hr48_512x512_80k_loveda.py │ │ ├── fcn_hr48_512x512_80k_potsdam.py │ │ └── hrnet.yml │ ├── icnet │ │ ├── README.md │ │ ├── icnet.yml │ │ ├── icnet_r101-d8_832x832_160k_cityscapes.py │ │ ├── icnet_r101-d8_832x832_80k_cityscapes.py │ │ ├── icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py │ │ ├── icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py │ │ ├── icnet_r18-d8_832x832_160k_cityscapes.py │ │ ├── icnet_r18-d8_832x832_80k_cityscapes.py │ │ ├── icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py │ │ ├── icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py │ │ ├── icnet_r50-d8_832x832_160k_cityscapes.py │ │ ├── icnet_r50-d8_832x832_80k_cityscapes.py │ │ ├── icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py │ │ └── icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py │ ├── isanet │ │ ├── README.md │ │ ├── isanet.yml │ │ ├── isanet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── isanet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── isanet_r101-d8_512x512_160k_ade20k.py │ │ ├── isanet_r101-d8_512x512_20k_voc12aug.py │ │ ├── isanet_r101-d8_512x512_40k_voc12aug.py │ │ ├── isanet_r101-d8_512x512_80k_ade20k.py │ │ ├── isanet_r101-d8_769x769_40k_cityscapes.py │ │ ├── isanet_r101-d8_769x769_80k_cityscapes.py │ │ ├── isanet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── isanet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── isanet_r50-d8_512x512_160k_ade20k.py │ │ ├── isanet_r50-d8_512x512_20k_voc12aug.py │ │ ├── isanet_r50-d8_512x512_40k_voc12aug.py │ │ ├── isanet_r50-d8_512x512_80k_ade20k.py │ │ ├── isanet_r50-d8_769x769_40k_cityscapes.py │ │ └── isanet_r50-d8_769x769_80k_cityscapes.py │ ├── mobilenet_v2 │ │ ├── README.md │ │ ├── deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_m-v2-d8_512x512_160k_ade20k.py │ │ ├── deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py │ │ ├── fcn_m-v2-d8_512x1024_80k_cityscapes.py │ │ ├── fcn_m-v2-d8_512x512_160k_ade20k.py │ │ ├── mobilenet_v2.yml │ │ ├── pspnet_m-v2-d8_512x1024_80k_cityscapes.py │ │ └── pspnet_m-v2-d8_512x512_160k_ade20k.py │ ├── mobilenet_v3 │ │ ├── README.md │ │ ├── lraspp_m-v3-d8_512x1024_320k_cityscapes.py │ │ ├── lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py │ │ ├── lraspp_m-v3s-d8_512x1024_320k_cityscapes.py │ │ ├── lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py │ │ └── mobilenet_v3.yml │ ├── nonlocal_net │ │ ├── README.md │ │ ├── nonlocal_net.yml │ │ ├── nonlocal_r101-d8_512x1024_40k_cityscapes.py │ │ ├── nonlocal_r101-d8_512x1024_80k_cityscapes.py │ │ ├── nonlocal_r101-d8_512x512_160k_ade20k.py │ │ ├── nonlocal_r101-d8_512x512_20k_voc12aug.py │ │ ├── nonlocal_r101-d8_512x512_40k_voc12aug.py │ │ ├── nonlocal_r101-d8_512x512_80k_ade20k.py │ │ ├── nonlocal_r101-d8_769x769_40k_cityscapes.py │ │ ├── nonlocal_r101-d8_769x769_80k_cityscapes.py │ │ ├── nonlocal_r50-d8_512x1024_40k_cityscapes.py │ │ ├── nonlocal_r50-d8_512x1024_80k_cityscapes.py │ │ ├── nonlocal_r50-d8_512x512_160k_ade20k.py │ │ ├── nonlocal_r50-d8_512x512_20k_voc12aug.py │ │ ├── nonlocal_r50-d8_512x512_40k_voc12aug.py │ │ ├── nonlocal_r50-d8_512x512_80k_ade20k.py │ │ ├── nonlocal_r50-d8_769x769_40k_cityscapes.py │ │ └── nonlocal_r50-d8_769x769_80k_cityscapes.py │ ├── ocrnet │ │ ├── README.md │ │ ├── ocrnet.yml │ │ ├── ocrnet_hr18_512x1024_160k_cityscapes.py │ │ ├── ocrnet_hr18_512x1024_40k_cityscapes.py │ │ ├── ocrnet_hr18_512x1024_80k_cityscapes.py │ │ ├── ocrnet_hr18_512x512_160k_ade20k.py │ │ ├── ocrnet_hr18_512x512_20k_voc12aug.py │ │ ├── ocrnet_hr18_512x512_40k_voc12aug.py │ │ ├── ocrnet_hr18_512x512_80k_ade20k.py │ │ ├── ocrnet_hr18s_512x1024_160k_cityscapes.py │ │ ├── ocrnet_hr18s_512x1024_40k_cityscapes.py │ │ ├── ocrnet_hr18s_512x1024_80k_cityscapes.py │ │ ├── ocrnet_hr18s_512x512_160k_ade20k.py │ │ ├── ocrnet_hr18s_512x512_20k_voc12aug.py │ │ ├── ocrnet_hr18s_512x512_40k_voc12aug.py │ │ ├── ocrnet_hr18s_512x512_80k_ade20k.py │ │ ├── ocrnet_hr48_512x1024_160k_cityscapes.py │ │ ├── ocrnet_hr48_512x1024_40k_cityscapes.py │ │ ├── ocrnet_hr48_512x1024_80k_cityscapes.py │ │ ├── ocrnet_hr48_512x512_160k_ade20k.py │ │ ├── ocrnet_hr48_512x512_20k_voc12aug.py │ │ ├── ocrnet_hr48_512x512_40k_voc12aug.py │ │ ├── ocrnet_hr48_512x512_80k_ade20k.py │ │ ├── ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py │ │ ├── ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py │ │ └── ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py │ ├── point_rend │ │ ├── README.md │ │ ├── point_rend.yml │ │ ├── pointrend_r101_512x1024_80k_cityscapes.py │ │ ├── pointrend_r101_512x512_160k_ade20k.py │ │ ├── pointrend_r50_512x1024_80k_cityscapes.py │ │ └── pointrend_r50_512x512_160k_ade20k.py │ ├── psanet │ │ ├── README.md │ │ ├── psanet.yml │ │ ├── psanet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── psanet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── psanet_r101-d8_512x512_160k_ade20k.py │ │ ├── psanet_r101-d8_512x512_20k_voc12aug.py │ │ ├── psanet_r101-d8_512x512_40k_voc12aug.py │ │ ├── psanet_r101-d8_512x512_80k_ade20k.py │ │ ├── psanet_r101-d8_769x769_40k_cityscapes.py │ │ ├── psanet_r101-d8_769x769_80k_cityscapes.py │ │ ├── psanet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── psanet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── psanet_r50-d8_512x512_160k_ade20k.py │ │ ├── psanet_r50-d8_512x512_20k_voc12aug.py │ │ ├── psanet_r50-d8_512x512_40k_voc12aug.py │ │ ├── psanet_r50-d8_512x512_80k_ade20k.py │ │ ├── psanet_r50-d8_769x769_40k_cityscapes.py │ │ └── psanet_r50-d8_769x769_80k_cityscapes.py │ ├── pspnet │ │ ├── README.md │ │ ├── pspnet.yml │ │ ├── pspnet_r101-d8_480x480_40k_pascal_context.py │ │ ├── pspnet_r101-d8_480x480_40k_pascal_context_59.py │ │ ├── pspnet_r101-d8_480x480_80k_pascal_context.py │ │ ├── pspnet_r101-d8_480x480_80k_pascal_context_59.py │ │ ├── pspnet_r101-d8_4x4_512x512_80k_potsdam.py │ │ ├── pspnet_r101-d8_4x4_512x512_80k_vaihingen.py │ │ ├── pspnet_r101-d8_512x1024_40k_cityscapes.py │ │ ├── pspnet_r101-d8_512x1024_40k_dark.py │ │ ├── pspnet_r101-d8_512x1024_40k_night_driving.py │ │ ├── pspnet_r101-d8_512x1024_80k_cityscapes.py │ │ ├── pspnet_r101-d8_512x512_160k_ade20k.py │ │ ├── pspnet_r101-d8_512x512_20k_voc12aug.py │ │ ├── pspnet_r101-d8_512x512_40k_voc12aug.py │ │ ├── pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k.py │ │ ├── pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k.py │ │ ├── pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k.py │ │ ├── pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k.py │ │ ├── pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k.py │ │ ├── pspnet_r101-d8_512x512_80k_ade20k.py │ │ ├── pspnet_r101-d8_512x512_80k_loveda.py │ │ ├── pspnet_r101-d8_769x769_40k_cityscapes.py │ │ ├── pspnet_r101-d8_769x769_80k_cityscapes.py │ │ ├── pspnet_r101-d8_fp16_512x1024_80k_cityscapes.py │ │ ├── pspnet_r101b-d8_512x1024_80k_cityscapes.py │ │ ├── pspnet_r101b-d8_512x1024_80k_dark.py │ │ ├── pspnet_r101b-d8_512x1024_80k_night_driving.py │ │ ├── pspnet_r101b-d8_769x769_80k_cityscapes.py │ │ ├── pspnet_r18-d8_4x4_512x512_80k_potsdam.py │ │ ├── pspnet_r18-d8_4x4_512x512_80k_vaihingen.py │ │ ├── pspnet_r18-d8_512x1024_80k_cityscapes.py │ │ ├── pspnet_r18-d8_512x512_80k_loveda.py │ │ ├── pspnet_r18-d8_769x769_80k_cityscapes.py │ │ ├── pspnet_r18b-d8_512x1024_80k_cityscapes.py │ │ ├── pspnet_r18b-d8_769x769_80k_cityscapes.py │ │ ├── pspnet_r50-d8_480x480_40k_pascal_context.py │ │ ├── pspnet_r50-d8_480x480_40k_pascal_context_59.py │ │ ├── pspnet_r50-d8_480x480_80k_pascal_context.py │ │ ├── pspnet_r50-d8_480x480_80k_pascal_context_59.py │ │ ├── pspnet_r50-d8_4x4_512x512_80k_potsdam.py │ │ ├── pspnet_r50-d8_4x4_512x512_80k_vaihingen.py │ │ ├── pspnet_r50-d8_512x1024_40k_cityscapes.py │ │ ├── pspnet_r50-d8_512x1024_40k_dark.py │ │ ├── pspnet_r50-d8_512x1024_40k_night_driving.py │ │ ├── pspnet_r50-d8_512x1024_80k_cityscapes.py │ │ ├── pspnet_r50-d8_512x1024_80k_dark.py │ │ ├── pspnet_r50-d8_512x1024_80k_night_driving.py │ │ ├── pspnet_r50-d8_512x512_160k_ade20k.py │ │ ├── pspnet_r50-d8_512x512_20k_voc12aug.py │ │ ├── pspnet_r50-d8_512x512_40k_voc12aug.py │ │ ├── pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py │ │ ├── pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py │ │ ├── pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py │ │ ├── pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py │ │ ├── pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py │ │ ├── pspnet_r50-d8_512x512_80k_ade20k.py │ │ ├── pspnet_r50-d8_512x512_80k_loveda.py │ │ ├── pspnet_r50-d8_769x769_40k_cityscapes.py │ │ ├── pspnet_r50-d8_769x769_80k_cityscapes.py │ │ ├── pspnet_r50b-d8_512x1024_80k_cityscapes.py │ │ └── pspnet_r50b-d8_769x769_80k_cityscapes.py │ ├── resnest │ │ ├── README.md │ │ ├── deeplabv3_s101-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3_s101-d8_512x512_160k_ade20k.py │ │ ├── deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py │ │ ├── deeplabv3plus_s101-d8_512x512_160k_ade20k.py │ │ ├── fcn_s101-d8_512x1024_80k_cityscapes.py │ │ ├── fcn_s101-d8_512x512_160k_ade20k.py │ │ ├── pspnet_s101-d8_512x1024_80k_cityscapes.py │ │ ├── pspnet_s101-d8_512x512_160k_ade20k.py │ │ └── resnest.yml │ ├── segformer │ │ ├── README.md │ │ ├── segformer.yml │ │ ├── segformer_mit-b0_512x512_160k_ade20k.py │ │ ├── segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py │ │ ├── segformer_mit-b1_512x512_160k_ade20k.py │ │ ├── segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py │ │ ├── segformer_mit-b2_512x512_160k_ade20k.py │ │ ├── segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py │ │ ├── segformer_mit-b3_512x512_160k_ade20k.py │ │ ├── segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py │ │ ├── segformer_mit-b4_512x512_160k_ade20k.py │ │ ├── segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py │ │ ├── segformer_mit-b5_512x512_160k_ade20k.py │ │ ├── segformer_mit-b5_640x640_160k_ade20k.py │ │ └── segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py │ ├── segmenter │ │ ├── README.md │ │ ├── segmenter.yml │ │ ├── segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py │ │ ├── segmenter_vit-l_mask_8x1_512x512_160k_ade20k.py │ │ ├── segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py │ │ ├── segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py │ │ └── segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py │ ├── sem_fpn │ │ ├── README.md │ │ ├── fpn_r101_512x1024_80k_cityscapes.py │ │ ├── fpn_r101_512x512_160k_ade20k.py │ │ ├── fpn_r50_512x1024_80k_cityscapes.py │ │ ├── fpn_r50_512x512_160k_ade20k.py │ │ ├── fpn_r50_our_imp_512x512_80k_potsdam.py │ │ ├── fpn_r50_our_imp_512x512_80k_vaihingen.py │ │ ├── fpn_r50_our_imp_896x896_80k_isaid.py │ │ ├── fpn_r50_our_rsp_512x512_80k_potsdam.py │ │ ├── fpn_r50_our_rsp_512x512_80k_vaihingen.py │ │ ├── fpn_r50_our_rsp_896x896_80k_isaid.py │ │ ├── fpn_r50_our_seco_512x512_80k_potsdam.py │ │ ├── fpn_r50_our_seco_512x512_80k_vaihingen.py │ │ ├── fpn_r50_our_seco_896x896_80k_isaid.py │ │ ├── fpn_r50_our_swin_512x512_80k_potsdam.py │ │ ├── fpn_r50_our_swin_512x512_80k_vaihingen.py │ │ ├── fpn_r50_our_swin_896x896_80k_isaid.py │ │ ├── fpn_r50_our_vitae_512x512_80k_potsdam.py │ │ ├── fpn_r50_our_vitae_512x512_80k_vaihingen.py │ │ ├── fpn_r50_our_vitae_896x896_80k_isaid.py │ │ └── sem_fpn.yml │ ├── setr │ │ ├── README.md │ │ ├── setr.yml │ │ ├── setr_mla_512x512_160k_b16_ade20k.py │ │ ├── setr_mla_512x512_160k_b8_ade20k.py │ │ ├── setr_naive_512x512_160k_b16_ade20k.py │ │ ├── setr_pup_512x512_160k_b16_ade20k.py │ │ ├── setr_vit-large_mla_8x1_768x768_80k_cityscapes.py │ │ ├── setr_vit-large_naive_8x1_768x768_80k_cityscapes.py │ │ └── setr_vit-large_pup_8x1_768x768_80k_cityscapes.py │ ├── stdc │ │ ├── README.md │ │ ├── stdc.yml │ │ ├── stdc1_512x1024_80k_cityscapes.py │ │ ├── stdc1_in1k-pre_512x1024_80k_cityscapes.py │ │ ├── stdc2_512x1024_80k_cityscapes.py │ │ └── stdc2_in1k-pre_512x1024_80k_cityscapes.py │ ├── swin │ │ ├── README.md │ │ ├── swin.yml │ │ ├── upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py │ │ ├── upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py │ │ ├── upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py │ │ ├── upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py │ │ ├── upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py │ │ ├── upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py │ │ ├── upernet_swin_tiny_patch4_window7_512x512_80k_potsdam.py │ │ ├── upernet_swin_tiny_patch4_window7_896x896_80k_isaid.py │ │ ├── upernet_swin_tiny_patch4_window7_imp_512x512_80k_potsdam.py │ │ └── upernet_swin_tiny_patch4_window7_imp_896x896_80k_isaid.py │ ├── twins │ │ ├── README.md │ │ ├── twins.yml │ │ ├── twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py │ │ ├── twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py │ │ ├── twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py │ │ ├── twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py │ │ ├── twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py │ │ ├── twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py │ │ ├── twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py │ │ ├── twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py │ │ ├── twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py │ │ ├── twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py │ │ ├── twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py │ │ └── twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py │ ├── unet │ │ ├── README.md │ │ ├── deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py │ │ ├── deeplabv3_unet_s5-d16_128x128_40k_stare.py │ │ ├── deeplabv3_unet_s5-d16_256x256_40k_hrf.py │ │ ├── deeplabv3_unet_s5-d16_64x64_40k_drive.py │ │ ├── deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py │ │ ├── deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py │ │ ├── deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py │ │ ├── deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py │ │ ├── fcn_unet_s5-d16_128x128_40k_chase_db1.py │ │ ├── fcn_unet_s5-d16_128x128_40k_stare.py │ │ ├── fcn_unet_s5-d16_256x256_40k_hrf.py │ │ ├── fcn_unet_s5-d16_4x4_512x1024_160k_cityscapes.py │ │ ├── fcn_unet_s5-d16_64x64_40k_drive.py │ │ ├── fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py │ │ ├── fcn_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py │ │ ├── fcn_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py │ │ ├── fcn_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py │ │ ├── pspnet_unet_s5-d16_128x128_40k_chase_db1.py │ │ ├── pspnet_unet_s5-d16_128x128_40k_stare.py │ │ ├── pspnet_unet_s5-d16_256x256_40k_hrf.py │ │ ├── pspnet_unet_s5-d16_64x64_40k_drive.py │ │ ├── pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py │ │ ├── pspnet_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_stare.py │ │ ├── pspnet_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf.py │ │ ├── pspnet_unet_s5-d16_ce-1.0-dice-3.0_64x64_40k_drive.py │ │ └── unet.yml │ ├── upernet │ │ ├── README.md │ │ ├── upernet.yml │ │ ├── upernet_our_imp_r50_512x512_80k_potsdam.py │ │ ├── upernet_our_imp_r50_896x896_80k_isaid.py │ │ ├── upernet_our_r50_512x512_80k_potsdam.py │ │ ├── upernet_our_r50_896x896_80k_isaid.py │ │ ├── upernet_r101_512x1024_40k_cityscapes.py │ │ ├── upernet_r101_512x1024_80k_cityscapes.py │ │ ├── upernet_r101_512x512_160k_ade20k.py │ │ ├── upernet_r101_512x512_20k_voc12aug.py │ │ ├── upernet_r101_512x512_40k_voc12aug.py │ │ ├── upernet_r101_512x512_80k_ade20k.py │ │ ├── upernet_r101_769x769_40k_cityscapes.py │ │ ├── upernet_r101_769x769_80k_cityscapes.py │ │ ├── upernet_r50_512x1024_40k_cityscapes.py │ │ ├── upernet_r50_512x1024_80k_cityscapes.py │ │ ├── upernet_r50_512x512_160k_ade20k.py │ │ ├── upernet_r50_512x512_20k_voc12aug.py │ │ ├── upernet_r50_512x512_40k_voc12aug.py │ │ ├── upernet_r50_512x512_80k_ade20k.py │ │ ├── upernet_r50_769x769_40k_cityscapes.py │ │ ├── upernet_r50_769x769_80k_cityscapes.py │ │ ├── upernet_seco_r50_512x512_80k_potsdam.py │ │ └── upernet_seco_r50_896x896_80k_isaid.py │ ├── vit │ │ ├── README.md │ │ ├── upernet_deit-b16_512x512_160k_ade20k.py │ │ ├── upernet_deit-b16_512x512_80k_ade20k.py │ │ ├── upernet_deit-b16_ln_mln_512x512_160k_ade20k.py │ │ ├── upernet_deit-b16_mln_512x512_160k_ade20k.py │ │ ├── upernet_deit-s16_512x512_160k_ade20k.py │ │ ├── upernet_deit-s16_512x512_80k_ade20k.py │ │ ├── upernet_deit-s16_ln_mln_512x512_160k_ade20k.py │ │ ├── upernet_deit-s16_mln_512x512_160k_ade20k.py │ │ ├── upernet_vit-b16_ln_mln_512x512_160k_ade20k.py │ │ ├── upernet_vit-b16_mln_512x512_160k_ade20k.py │ │ ├── upernet_vit-b16_mln_512x512_80k_ade20k.py │ │ └── vit.yml │ └── vitae_win │ │ ├── upernet_vitae_win_imp_window7_512x512_80k_potsdam.py │ │ ├── upernet_vitae_win_imp_window7_896x896_80k_isaid.py │ │ ├── upernet_vitae_win_window7_512x512_80k_potsdam_epoch100.py │ │ ├── upernet_vitae_win_window7_512x512_80k_potsdam_epoch40.py │ │ ├── upernet_vitae_win_window7_896x896_80k_isaid_epoch100.py │ │ └── upernet_vitae_win_window7_896x896_80k_isaid_epoch40.py ├── custom │ ├── __init__.py │ └── checkpoint.py ├── demo │ ├── MMSegmentation_Tutorial.ipynb │ ├── demo.png │ ├── image_demo.py │ ├── inference_demo.ipynb │ └── video_demo.py ├── docker │ ├── Dockerfile │ └── serve │ │ ├── Dockerfile │ │ ├── config.properties │ │ └── entrypoint.sh ├── docs │ ├── en │ │ ├── Makefile │ │ ├── _static │ │ │ ├── css │ │ │ │ └── readthedocs.css │ │ │ └── images │ │ │ │ └── mmsegmentation.png │ │ ├── api.rst │ │ ├── changelog.md │ │ ├── conf.py │ │ ├── dataset_prepare.md │ │ ├── get_started.md │ │ ├── index.rst │ │ ├── inference.md │ │ ├── make.bat │ │ ├── model_zoo.md │ │ ├── stat.py │ │ ├── switch_language.md │ │ ├── train.md │ │ ├── tutorials │ │ │ ├── config.md │ │ │ ├── customize_datasets.md │ │ │ ├── customize_models.md │ │ │ ├── customize_runtime.md │ │ │ ├── data_pipeline.md │ │ │ ├── index.rst │ │ │ └── training_tricks.md │ │ └── useful_tools.md │ └── zh_cn │ │ ├── Makefile │ │ ├── _static │ │ ├── css │ │ │ └── readthedocs.css │ │ └── images │ │ │ └── mmsegmentation.png │ │ ├── api.rst │ │ ├── conf.py │ │ ├── dataset_prepare.md │ │ ├── get_started.md │ │ ├── imgs │ │ ├── qq_group_qrcode.jpg │ │ ├── seggroup_qrcode.jpg │ │ └── zhihu_qrcode.jpg │ │ ├── index.rst │ │ ├── inference.md │ │ ├── make.bat │ │ ├── model_zoo.md │ │ ├── stat.py │ │ ├── switch_language.md │ │ ├── train.md │ │ ├── tutorials │ │ ├── config.md │ │ ├── customize_datasets.md │ │ ├── customize_models.md │ │ ├── customize_runtime.md │ │ ├── data_pipeline.md │ │ ├── index.rst │ │ └── training_tricks.md │ │ └── useful_tools.md ├── mmseg │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── class_names.py │ │ │ ├── eval_hooks.py │ │ │ └── metrics.py │ │ ├── seg │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ └── sampler │ │ │ │ ├── __init__.py │ │ │ │ ├── base_pixel_sampler.py │ │ │ │ └── ohem_pixel_sampler.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── misc.py │ ├── datasets │ │ ├── __init__.py │ │ ├── ade.py │ │ ├── builder.py │ │ ├── chase_db1.py │ │ ├── cityscapes.py │ │ ├── coco_stuff.py │ │ ├── custom.py │ │ ├── dark_zurich.py │ │ ├── dataset_wrappers.py │ │ ├── drive.py │ │ ├── hrf.py │ │ ├── isaid.py │ │ ├── isprs.py │ │ ├── loveda.py │ │ ├── night_driving.py │ │ ├── pascal_context.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── compose.py │ │ │ ├── formating.py │ │ │ ├── formatting.py │ │ │ ├── loading.py │ │ │ ├── test_time_aug.py │ │ │ └── transforms.py │ │ ├── potsdam.py │ │ ├── potsdam_ori.py │ │ ├── stare.py │ │ ├── vaihingen.py │ │ └── voc.py │ ├── models │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── ViTAE_Window_NoShift │ │ │ │ ├── NormalCell.py │ │ │ │ ├── ReductionCell.py │ │ │ │ ├── SELayer.py │ │ │ │ ├── __init__.py │ │ │ │ ├── base_model.py │ │ │ │ ├── models.py │ │ │ │ ├── swin.py │ │ │ │ ├── token_performer.py │ │ │ │ └── token_transformer.py │ │ │ ├── __init__.py │ │ │ ├── bisenetv1.py │ │ │ ├── bisenetv2.py │ │ │ ├── cgnet.py │ │ │ ├── custom_load │ │ │ │ ├── __init__.py │ │ │ │ └── checkpoint.py │ │ │ ├── erfnet.py │ │ │ ├── fast_scnn.py │ │ │ ├── hrnet.py │ │ │ ├── icnet.py │ │ │ ├── mit.py │ │ │ ├── mobilenet_v2.py │ │ │ ├── mobilenet_v3.py │ │ │ ├── our_resnet.py │ │ │ ├── resnest.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ ├── stdc.py │ │ │ ├── swin.py │ │ │ ├── swin_transformer.py │ │ │ ├── timm_backbone.py │ │ │ ├── twins.py │ │ │ ├── unet.py │ │ │ └── vit.py │ │ ├── builder.py │ │ ├── decode_heads │ │ │ ├── __init__.py │ │ │ ├── ann_head.py │ │ │ ├── apc_head.py │ │ │ ├── aspp_head.py │ │ │ ├── cascade_decode_head.py │ │ │ ├── cc_head.py │ │ │ ├── da_head.py │ │ │ ├── decode_head.py │ │ │ ├── dm_head.py │ │ │ ├── dnl_head.py │ │ │ ├── dpt_head.py │ │ │ ├── ema_head.py │ │ │ ├── enc_head.py │ │ │ ├── fcn_head.py │ │ │ ├── fpn_head.py │ │ │ ├── gc_head.py │ │ │ ├── isa_head.py │ │ │ ├── lraspp_head.py │ │ │ ├── nl_head.py │ │ │ ├── ocr_head.py │ │ │ ├── point_head.py │ │ │ ├── psa_head.py │ │ │ ├── psp_head.py │ │ │ ├── segformer_head.py │ │ │ ├── segmenter_mask_head.py │ │ │ ├── sep_aspp_head.py │ │ │ ├── sep_fcn_head.py │ │ │ ├── setr_mla_head.py │ │ │ ├── setr_up_head.py │ │ │ ├── stdc_head.py │ │ │ └── uper_head.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── accuracy.py │ │ │ ├── cross_entropy_loss.py │ │ │ ├── dice_loss.py │ │ │ ├── focal_loss.py │ │ │ ├── lovasz_loss.py │ │ │ └── utils.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ ├── fpn.py │ │ │ ├── ic_neck.py │ │ │ ├── jpu.py │ │ │ ├── mla_neck.py │ │ │ └── multilevel_neck.py │ │ ├── segmentors │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── cascade_encoder_decoder.py │ │ │ └── encoder_decoder.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── embed.py │ │ │ ├── inverted_residual.py │ │ │ ├── make_divisible.py │ │ │ ├── res_layer.py │ │ │ ├── se_layer.py │ │ │ ├── self_attention_block.py │ │ │ ├── shape_convert.py │ │ │ └── up_conv_block.py │ ├── ops │ │ ├── __init__.py │ │ ├── encoding.py │ │ └── wrappers.py │ ├── utils │ │ ├── __init__.py │ │ ├── collect_env.py │ │ ├── logger.py │ │ ├── misc.py │ │ └── set_env.py │ └── version.py ├── model-index.yml ├── pytest.ini ├── requirements.txt ├── requirements │ ├── docs.txt │ ├── mminstall.txt │ ├── optional.txt │ ├── readthedocs.txt │ ├── runtime.txt │ └── tests.txt ├── resources │ ├── 3dogs.jpg │ ├── 3dogs_mask.png │ ├── mmseg-logo.png │ └── seg_demo.gif ├── setup.cfg ├── setup.py ├── tests │ ├── __init__.py │ ├── test_apis │ │ └── test_single_gpu.py │ ├── test_config.py │ ├── test_data │ │ ├── test_dataset.py │ │ ├── test_dataset_builder.py │ │ ├── test_loading.py │ │ ├── test_transform.py │ │ └── test_tta.py │ ├── test_digit_version.py │ ├── test_eval_hook.py │ ├── test_inference.py │ ├── test_metrics.py │ ├── test_models │ │ ├── __init__.py │ │ ├── test_backbones │ │ │ ├── __init__.py │ │ │ ├── test_bisenetv1.py │ │ │ ├── test_bisenetv2.py │ │ │ ├── test_blocks.py │ │ │ ├── test_cgnet.py │ │ │ ├── test_erfnet.py │ │ │ ├── test_fast_scnn.py │ │ │ ├── test_hrnet.py │ │ │ ├── test_icnet.py │ │ │ ├── test_mit.py │ │ │ ├── test_mobilenet_v3.py │ │ │ ├── test_resnest.py │ │ │ ├── test_resnet.py │ │ │ ├── test_resnext.py │ │ │ ├── test_stdc.py │ │ │ ├── test_swin.py │ │ │ ├── test_timm_backbone.py │ │ │ ├── test_twins.py │ │ │ ├── test_unet.py │ │ │ ├── test_vit.py │ │ │ └── utils.py │ │ ├── test_forward.py │ │ ├── test_heads │ │ │ ├── __init__.py │ │ │ ├── test_ann_head.py │ │ │ ├── test_apc_head.py │ │ │ ├── test_aspp_head.py │ │ │ ├── test_cc_head.py │ │ │ ├── test_da_head.py │ │ │ ├── test_decode_head.py │ │ │ ├── test_dm_head.py │ │ │ ├── test_dnl_head.py │ │ │ ├── test_dpt_head.py │ │ │ ├── test_ema_head.py │ │ │ ├── test_enc_head.py │ │ │ ├── test_fcn_head.py │ │ │ ├── test_gc_head.py │ │ │ ├── test_isa_head.py │ │ │ ├── test_lraspp_head.py │ │ │ ├── test_nl_head.py │ │ │ ├── test_ocr_head.py │ │ │ ├── test_point_head.py │ │ │ ├── test_psa_head.py │ │ │ ├── test_psp_head.py │ │ │ ├── test_segformer_head.py │ │ │ ├── test_segmenter_mask_head.py │ │ │ ├── test_setr_mla_head.py │ │ │ ├── test_setr_up_head.py │ │ │ ├── test_stdc_head.py │ │ │ ├── test_uper_head.py │ │ │ └── utils.py │ │ ├── test_losses │ │ │ ├── __init__.py │ │ │ ├── test_ce_loss.py │ │ │ ├── test_dice_loss.py │ │ │ ├── test_focal_loss.py │ │ │ ├── test_lovasz_loss.py │ │ │ └── test_utils.py │ │ ├── test_necks │ │ │ ├── __init__.py │ │ │ ├── test_fpn.py │ │ │ ├── test_ic_neck.py │ │ │ ├── test_jpu.py │ │ │ ├── test_mla_neck.py │ │ │ └── test_multilevel_neck.py │ │ ├── test_segmentors │ │ │ ├── __init__.py │ │ │ ├── test_cascade_encoder_decoder.py │ │ │ ├── test_encoder_decoder.py │ │ │ └── utils.py │ │ └── test_utils │ │ │ ├── __init__.py │ │ │ └── test_embed.py │ ├── test_sampler.py │ └── test_utils │ │ ├── test_misc.py │ │ └── test_set_env.py └── tools │ ├── analyze_logs.py │ ├── benchmark.py │ ├── browse_dataset.py │ ├── convert_datasets │ ├── chase_db1.py │ ├── cityscapes.py │ ├── coco_stuff10k.py │ ├── coco_stuff164k.py │ ├── drive.py │ ├── hrf.py │ ├── loveda.py │ ├── pascal_context.py │ ├── potsdam.py │ ├── stare.py │ ├── vaihingen.py │ └── voc_aug.py │ ├── convert_iSAID_mask2graymask.py │ ├── deploy_test.py │ ├── dist_test.sh │ ├── dist_train.sh │ ├── get_flops.py │ ├── model_converters │ ├── mit2mmseg.py │ ├── stdc2mmseg.py │ ├── swin2mmseg.py │ ├── twins2mmseg.py │ ├── vit2mmseg.py │ └── vitjax2mmseg.py │ ├── onnx2tensorrt.py │ ├── print_config.py │ ├── publish_model.py │ ├── pytorch2onnx.py │ ├── pytorch2torchscript.py │ ├── slurm_test.sh │ ├── slurm_train.sh │ ├── split_isaid.py │ ├── test.py │ ├── torchserve │ ├── mmseg2torchserve.py │ ├── mmseg_handler.py │ └── test_torchserve.py │ └── train.py └── highlycited.png /Change Detection/models/ViTAE_Window_NoShift/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd. 2 | # 3 | # This source code is licensed under the Clear BSD License 4 | # LICENSE file in the root directory of this file 5 | # All rights reserved. 6 | from .models import * 7 | -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/NormalCell.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/NormalCell.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/NormalCell.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/NormalCell.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/ReductionCell.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/ReductionCell.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/ReductionCell.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/ReductionCell.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/SELayer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/SELayer.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/SELayer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/SELayer.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/base_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/base_model.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/base_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/base_model.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/models.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/models.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/models.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/models.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/swin.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/swin.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/swin.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/swin.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_performer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_performer.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_performer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_performer.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_transformer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_transformer.cpython-37.pyc -------------------------------------------------------------------------------- /Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_transformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/ViTAE_Window_NoShift/__pycache__/token_transformer.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/__pycache__/Models.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/__pycache__/Models.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/__pycache__/networks.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/__pycache__/networks.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/__pycache__/siamunet_dif.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/__pycache__/siamunet_dif.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/models/__pycache__/swin_transformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/models/__pycache__/swin_transformer.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/utils/__pycache__/dataloaders.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/utils/__pycache__/dataloaders.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/utils/__pycache__/helpers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/utils/__pycache__/helpers.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/utils/__pycache__/losses.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/utils/__pycache__/losses.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/utils/__pycache__/metrics.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/utils/__pycache__/metrics.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/utils/__pycache__/parser.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/utils/__pycache__/parser.cpython-38.pyc -------------------------------------------------------------------------------- /Change Detection/utils/__pycache__/transforms.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Change Detection/utils/__pycache__/transforms.cpython-38.pyc -------------------------------------------------------------------------------- /Figs/aerialscene.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Figs/aerialscene.png -------------------------------------------------------------------------------- /Figs/cd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Figs/cd.png -------------------------------------------------------------------------------- /Figs/det.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Figs/det.png -------------------------------------------------------------------------------- /Figs/seg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Figs/seg.png -------------------------------------------------------------------------------- /Figs/video.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Figs/video.png -------------------------------------------------------------------------------- /Object Detection/.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /Object Detection/.github/ISSUE_TEMPLATE/general_questions.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General questions 3 | about: Ask general questions to get help 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /Object Detection/.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "BboxToolkit"] 2 | path = BboxToolkit 3 | url = https://github.com/jbwang1997/BboxToolkit.git 4 | -------------------------------------------------------------------------------- /Object Detection/.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | python: 4 | version: 3.7 5 | install: 6 | - requirements: requirements/docs.txt 7 | - requirements: requirements/readthedocs.txt 8 | -------------------------------------------------------------------------------- /Object Detection/.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | BASED_ON_STYLE = pep8 3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 5 | -------------------------------------------------------------------------------- /Object Detection/BboxToolkit/BboxToolkit/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .mean_ap import eval_map 2 | from .recall import eval_recalls 3 | -------------------------------------------------------------------------------- /Object Detection/BboxToolkit/BboxToolkit/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .colors import list_named_colors, single_color_val, colors_val, random_colors 2 | from .draw import draw_hbb, draw_obb, draw_poly 3 | from .show import plt_init, get_img_from_fig, imshow_bboxes 4 | -------------------------------------------------------------------------------- /Object Detection/BboxToolkit/definition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/BboxToolkit/definition.png -------------------------------------------------------------------------------- /Object Detection/BboxToolkit/tools/vis_configs/dior/short_names.txt: -------------------------------------------------------------------------------- 1 | APL 2 | APO 3 | BF 4 | BC 5 | BR 6 | CH 7 | ESA 8 | ETA 9 | DAM 10 | GF 11 | GTF 12 | HB 13 | OP 14 | SH 15 | STA 16 | STO 17 | TC 18 | TS 19 | VE 20 | WM 21 | -------------------------------------------------------------------------------- /Object Detection/BboxToolkit/tools/vis_configs/dota1_0/short_names.txt: -------------------------------------------------------------------------------- 1 | LV 2 | SP 3 | HE 4 | BR 5 | PL 6 | SH 7 | SBF 8 | BC 9 | GTF 10 | SV 11 | BD 12 | TC 13 | RA 14 | ST 15 | HA 16 | -------------------------------------------------------------------------------- /Object Detection/BboxToolkit/tools/vis_configs/hrsc/short_names.txt: -------------------------------------------------------------------------------- 1 | S 2 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/cascade_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Object Detection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 19]) 4 | total_epochs = 20 5 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deformable_groups=4, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), 5 | stage_with_dcn=(False, True, True, True))) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/detectors/htc_r50_sac_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | backbone=dict( 5 | type='DetectoRS_ResNet', 6 | conv_cfg=dict(type='ConvAWS'), 7 | sac=dict(type='SAC', use_deform=True), 8 | stage_with_sac=(False, True, True, True))) 9 | -------------------------------------------------------------------------------- /Object Detection/configs/fast_rcnn/README.md: -------------------------------------------------------------------------------- 1 | # Fast R-CNN 2 | 3 | ## Introduction 4 | ``` 5 | @inproceedings{girshick2015fast, 6 | title={Fast r-cnn}, 7 | author={Girshick, Ross}, 8 | booktitle={Proceedings of the IEEE international conference on computer vision}, 9 | year={2015} 10 | } 11 | ``` 12 | 13 | ## Results and models 14 | -------------------------------------------------------------------------------- /Object Detection/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fast_rcnn_r50_fpn_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | classes = ('person', ) 3 | data = dict( 4 | train=dict(classes=classes), 5 | val=dict(classes=classes), 6 | test=dict(classes=classes)) 7 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | roi_head=dict( 4 | bbox_head=dict( 5 | reg_decoded_bbox=True, 6 | loss_bbox=dict(type='IoULoss', loss_weight=10.0)))) 7 | -------------------------------------------------------------------------------- /Object Detection/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py' 2 | train_cfg = dict(rcnn=dict(sampler=dict(type='OHEMSampler'))) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/fcos/fcos_r101_caffe_fpn_gn-head_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/fcos/fcos_r101_caffe_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py'] 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /Object Detection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/fsaf/fsaf_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fsaf_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' 2 | model = dict( 3 | backbone=dict( 4 | norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[20, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron/resnet101_gn', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /Object Detection/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /Object Detection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[28, 34]) 5 | total_epochs = 36 6 | -------------------------------------------------------------------------------- /Object Detection/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' 2 | 3 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' 2 | # model settings 3 | model = dict( 4 | pretrained='open-mmlab://detectron2/resnet101_caffe', 5 | backbone=dict(depth=101)) 6 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w18_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_hrnetv2p_w40_20e_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[24, 27]) 4 | total_epochs = 28 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[24, 27]) 4 | total_epochs = 28 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/htc/htc_r101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | # learning policy 4 | lr_config = dict(step=[16, 19]) 5 | total_epochs = 20 6 | -------------------------------------------------------------------------------- /Object Detection/configs/htc/htc_r50_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 19]) 4 | total_epochs = 20 5 | -------------------------------------------------------------------------------- /Object Detection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | 3 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 4 | -------------------------------------------------------------------------------- /Object Detection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /Object Detection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Object Detection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/coco_instance.py', 4 | '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Object Detection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/faster_rcnn_obb/faster_rcnn_obb_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_obb_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/faster_rcnn_obb/faster_rcnn_obb_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_obb_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/gliding_vertex/gliding_vertex_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './gliding_vertex_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/gliding_vertex/gliding_vertex_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './gliding_vertex_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/oriented_rcnn/faster_rcnn_orpn_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_orpn_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/oriented_rcnn/faster_rcnn_orpn_r101_fpn_1x_ms_rr_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_orpn_r50_fpn_1x_ms_rr_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/oriented_rcnn/faster_rcnn_orpn_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_orpn_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/oriented_rcnn/illustration.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/configs/obb/oriented_rcnn/illustration.jpg -------------------------------------------------------------------------------- /Object Detection/configs/obb/oriented_rcnn/orpn_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './orpn_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/retinanet_obb/retinanet_obb_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/retinanet_obb/retinanet_obb_r101_fpn_2x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r101_fpn_1x_dota10.py' 2 | 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/retinanet_obb/retinanet_obb_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/retinanet_obb/retinanet_obb_r50_fpn_2x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_obb_r50_fpn_1x_dota10.py' 2 | 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/roi_transformer/faster_rcnn_roitrans_r101_fpn_1x_dota10.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_roitrans_r50_fpn_1x_dota10.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/obb/roi_transformer/faster_rcnn_roitrans_r101_fpn_3x_hrsc.py: -------------------------------------------------------------------------------- 1 | _base_ = './faster_rcnn_roitrans_r50_fpn_3x_hrsc.py' 2 | 3 | # model 4 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' 2 | 3 | model = dict( 4 | neck=dict( 5 | type='PAFPN', 6 | in_channels=[256, 512, 1024, 2048], 7 | out_channels=256, 8 | num_outs=5)) 9 | -------------------------------------------------------------------------------- /Object Detection/configs/pisa/pisa_ssd300_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../ssd/ssd300_coco.py' 2 | 3 | model = dict(bbox_head=dict(type='PISASSDHead')) 4 | 5 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 6 | 7 | optimizer_config = dict( 8 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 9 | -------------------------------------------------------------------------------- /Object Detection/configs/pisa/pisa_ssd512_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../ssd/ssd512_coco.py' 2 | 3 | model = dict(bbox_head=dict(type='PISASSDHead')) 4 | 5 | train_cfg = dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)) 6 | 7 | optimizer_config = dict( 8 | _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) 9 | -------------------------------------------------------------------------------- /Object Detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /Object Detection/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/reppoints/reppoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/configs/reppoints/reppoints.png -------------------------------------------------------------------------------- /Object Detection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='minmax')) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_1x_coco.py' 2 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) 3 | model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) 4 | optimizer = dict(lr=0.01) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | lr_config = dict(step=[16, 22]) 3 | total_epochs = 24 4 | -------------------------------------------------------------------------------- /Object Detection/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' 2 | model = dict(bbox_head=dict(transform_method='partial_minmax')) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/res2net/htc_r2_101_fpn_20e_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../htc/htc_r50_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | # learning policy 6 | lr_config = dict(step=[16, 19]) 7 | total_epochs = 20 8 | -------------------------------------------------------------------------------- /Object Detection/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://res2net101_v1d_26w_4s', 4 | backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/retinanet/retinanet_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/retinanet/retinanet_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 23]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[28, 34]) 4 | total_epochs = 36 5 | -------------------------------------------------------------------------------- /Object Detection/configs/retinanet/retinanet_r50_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/retinanet_r50_fpn.py', 3 | '../_base_/datasets/coco_detection.py', 4 | '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | # optimizer 7 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) 8 | -------------------------------------------------------------------------------- /Object Detection/configs/retinanet/retinanet_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './retinanet_r50_fpn_1x_coco.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 22]) 4 | total_epochs = 24 5 | -------------------------------------------------------------------------------- /Object Detection/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_caffe_fpn_1x_coco.py' 2 | model = dict( 3 | pretrained='open-mmlab://detectron2/resnet101_caffe', 4 | backbone=dict(depth=101)) 5 | -------------------------------------------------------------------------------- /Object Detection/configs/rpn/rpn_r101_fpn_1x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/rpn/rpn_r101_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_2x_coco.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Object Detection/configs/rpn/rpn_r50_fpn_2x_coco.py: -------------------------------------------------------------------------------- 1 | _base_ = './rpn_r50_fpn_1x_coco.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 22]) 5 | total_epochs = 24 6 | -------------------------------------------------------------------------------- /Object Detection/demo/coco_test_12510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/demo/coco_test_12510.jpg -------------------------------------------------------------------------------- /Object Detection/demo/corruptions_sev_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/demo/corruptions_sev_3.png -------------------------------------------------------------------------------- /Object Detection/demo/data_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/demo/data_pipeline.png -------------------------------------------------------------------------------- /Object Detection/demo/demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/demo/demo.jpg -------------------------------------------------------------------------------- /Object Detection/demo/dota_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/demo/dota_demo.png -------------------------------------------------------------------------------- /Object Detection/demo/loss_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/demo/loss_curve.png -------------------------------------------------------------------------------- /Object Detection/demo/obbdet_show.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/demo/obbdet_show.jpg -------------------------------------------------------------------------------- /Object Detection/mmcv_custom/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .checkpoint import load_checkpoint 4 | 5 | __all__ = ['load_checkpoint'] 6 | -------------------------------------------------------------------------------- /Object Detection/mmdet/VERSION: -------------------------------------------------------------------------------- 1 | 2.2.0 2 | -------------------------------------------------------------------------------- /Object Detection/mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__, short_version 2 | 3 | __all__ = ['__version__', 'short_version'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/apis/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/apis/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/core/anchor/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | ANCHOR_GENERATORS = Registry('Anchor generator') 4 | 5 | 6 | def build_anchor_generator(cfg, default_args=None): 7 | return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args) 8 | -------------------------------------------------------------------------------- /Object Detection/mmdet/core/anchor/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/core/anchor/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/core/bbox/coder/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/core/bbox/coder/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/core/bbox/iou_calculators/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | IOU_CALCULATORS = Registry('IoU calculator') 4 | 5 | 6 | def build_iou_calculator(cfg, default_args=None): 7 | """Builder of IoU calculator""" 8 | return build_from_cfg(cfg, IOU_CALCULATORS, default_args) 9 | -------------------------------------------------------------------------------- /Object Detection/mmdet/core/bbox/iou_calculators/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/core/bbox/iou_calculators/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/core/bbox/samplers/obb/__init__.py: -------------------------------------------------------------------------------- 1 | from .obb_sampling_result import OBBSamplingResult 2 | from .obb_base_sampler import OBBBaseSampler 3 | from .obb_random_sampler import OBBRandomSampler 4 | from .obb_ohem_sampler import OBBOHEMSampler 5 | -------------------------------------------------------------------------------- /Object Detection/mmdet/core/evaluation/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/core/evaluation/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/core/fp16/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import auto_fp16, force_fp32 2 | from .hooks import Fp16OptimizerHook, wrap_fp16_model 3 | 4 | __all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] 5 | -------------------------------------------------------------------------------- /Object Detection/mmdet/core/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | from .random_fp import RandomFPHook 2 | -------------------------------------------------------------------------------- /Object Detection/mmdet/core/mask/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/core/mask/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/core/post_processing/obb/__init__.py: -------------------------------------------------------------------------------- 1 | from .obb_nms import multiclass_arb_nms 2 | from .obb_merge_augs import (merge_rotate_aug_proposals, merge_rotate_aug_hbb, 3 | merge_rotate_aug_obb, merge_rotate_aug_poly, 4 | merge_rotate_aug_arb) 5 | -------------------------------------------------------------------------------- /Object Detection/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import DistOptimizerHook, allreduce_grads 2 | from .misc import multi_apply, tensor2imgs, unmap 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'multi_apply', 6 | 'unmap' 7 | ] 8 | -------------------------------------------------------------------------------- /Object Detection/mmdet/datasets/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/datasets/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/datasets/pipelines/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/datasets/pipelines/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .distributed_sampler import DistributedSampler 2 | from .group_sampler import DistributedGroupSampler, GroupSampler 3 | 4 | __all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler'] 5 | -------------------------------------------------------------------------------- /Object Detection/mmdet/models/backbones/ViTAE_Window_NoShift/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd. 2 | # 3 | # This source code is licensed under the Clear BSD License 4 | # LICENSE file in the root directory of this file 5 | # All rights reserved. 6 | from .models import * 7 | -------------------------------------------------------------------------------- /Object Detection/mmdet/models/dense_heads/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/models/dense_heads/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/models/detectors/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/models/detectors/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/models/losses/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/models/losses/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/models/roi_heads/bbox_heads/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/models/roi_heads/bbox_heads/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/models/roi_heads/obb/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Object Detection/mmdet/models/roi_heads/roi_extractors/obb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Object Detection/mmdet/models/roi_heads/roi_extractors/obb/__init__.py -------------------------------------------------------------------------------- /Object Detection/mmdet/models/roi_heads/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/box_iou_rotated/__init__.py: -------------------------------------------------------------------------------- 1 | from .box_iou_rotated_wrapper import obb_overlaps 2 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/convex/__init__.py: -------------------------------------------------------------------------------- 1 | from .convex_wrapper import convex_sort 2 | 3 | __all = ['convex_sort'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/corner_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .corner_pool import CornerPool 2 | 3 | __all__ = ['CornerPool'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/masked_conv/__init__.py: -------------------------------------------------------------------------------- 1 | from .masked_conv import MaskedConv2d, masked_conv2d 2 | 3 | __all__ = ['masked_conv2d', 'MaskedConv2d'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_wrapper import batched_nms, nms, nms_match, soft_nms 2 | 3 | __all__ = ['nms', 'soft_nms', 'batched_nms', 'nms_match'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/nms_rotated/__init__.py: -------------------------------------------------------------------------------- 1 | from .nms_rotated_wrapper import obb_nms, poly_nms, BT_nms, arb_batched_nms 2 | 3 | __all__ = ['obb_nms', 'poly_nms', 'BT_nms', 'arb_batched_nms'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/nms_rotated/src/poly_nms_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | template 4 | at::Tensor poly_nms_cpu_kernel(const at::Tensor& dets, const float threshold) { 5 | 6 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/orn/__init__.py: -------------------------------------------------------------------------------- 1 | from .modules.ORConv import ORConv2d 2 | from .functions import rotation_invariant_encoding,RotationInvariantPooling 3 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/orn/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .ORConv import ORConv2d 2 | #from .ORConv_v2 import ORConv2d_v2 3 | 4 | #__all__ = ['ORConv2d', 'ORConv2d_v2'] 5 | __all__ = ['ORConv2d'] 6 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/roi_align/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_align import RoIAlign, roi_align 2 | 3 | __all__ = ['roi_align', 'RoIAlign'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/roi_align_rotated/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_align_rotated import RoIAlignRotated, roi_align_rotated 2 | 3 | __all__ = ['roi_align_rotated', 'RoIAlignRotated'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/roi_pool/__init__.py: -------------------------------------------------------------------------------- 1 | from .roi_pool import RoIPool, roi_pool 2 | 3 | __all__ = ['roi_pool', 'RoIPool'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/ops/sigmoid_focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss 2 | 3 | __all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] 4 | -------------------------------------------------------------------------------- /Object Detection/mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .logger import get_root_logger 3 | 4 | __all__ = ['get_root_logger', 'collect_env'] 5 | -------------------------------------------------------------------------------- /Object Detection/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /Object Detection/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/build.txt 2 | -r requirements/optional.txt 3 | -r requirements/runtime.txt 4 | -r requirements/tests.txt 5 | -------------------------------------------------------------------------------- /Object Detection/requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building mmdetection 2 | numpy 3 | torch>=1.3 4 | -------------------------------------------------------------------------------- /Object Detection/requirements/docs.txt: -------------------------------------------------------------------------------- 1 | recommonmark 2 | sphinx 3 | sphinx_markdown_tables 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /Object Detection/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 2 | cityscapesscripts 3 | imagecorruptions 4 | -------------------------------------------------------------------------------- /Object Detection/requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv 2 | torch 3 | torchvision 4 | -------------------------------------------------------------------------------- /Object Detection/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | mmcv-full>=1.3 3 | numpy 4 | # need older pillow until torchvision is fixed 5 | Pillow<=6.2.2 6 | six 7 | terminaltables 8 | torch>=1.3 9 | torchvision 10 | -------------------------------------------------------------------------------- /Object Detection/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | interrogate 5 | isort 6 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 7 | kwarray 8 | pytest 9 | pytest-cov 10 | pytest-runner 11 | ubelt 12 | xdoctest >= 0.10.0 13 | yapf 14 | -------------------------------------------------------------------------------- /Object Detection/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | PORT=${PORT:-29500} 6 | 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /Scene Recognition/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/__pycache__/logger.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/__pycache__/logger.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/__pycache__/lr_scheduler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/__pycache__/lr_scheduler.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/__pycache__/optimizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/__pycache__/optimizer.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/configs/swin_tiny_patch4_window7_224.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: swin 3 | NAME: swin_tiny_patch4_window7_224 4 | DROP_PATH_RATE: 0.2 5 | SWIN: 6 | EMBED_DIM: 96 7 | DEPTHS: [ 2, 2, 6, 2 ] 8 | NUM_HEADS: [ 3, 6, 12, 24 ] 9 | WINDOW_SIZE: 7 -------------------------------------------------------------------------------- /Scene Recognition/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .build import build_loader -------------------------------------------------------------------------------- /Scene Recognition/data/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/data/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/data/__pycache__/build.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/data/__pycache__/build.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/data/__pycache__/cached_image_folder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/data/__pycache__/cached_image_folder.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/data/__pycache__/samplers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/data/__pycache__/samplers.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/data/__pycache__/zipreader.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/data/__pycache__/zipreader.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/master_addr: -------------------------------------------------------------------------------- 1 | 11.71.1.232 2 | 11.71.1.232 3 | 11.71.1.232 4 | -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd. 2 | # 3 | # This source code is licensed under the Clear BSD License 4 | # LICENSE file in the root directory of this file 5 | # All rights reserved. 6 | from .models import * 7 | -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/NormalCell.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/NormalCell.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/ReductionCell.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/ReductionCell.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/SELayer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/SELayer.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/base_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/base_model.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/models.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/models.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/swin.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/swin.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/token_performer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/token_performer.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/token_transformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/ViTAE_Window_NoShift/__pycache__/token_transformer.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .build import build_model -------------------------------------------------------------------------------- /Scene Recognition/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/__pycache__/build.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/__pycache__/build.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/__pycache__/swin_mlp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/__pycache__/swin_mlp.cpython-38.pyc -------------------------------------------------------------------------------- /Scene Recognition/models/__pycache__/swin_transformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Scene Recognition/models/__pycache__/swin_transformer.cpython-38.pyc -------------------------------------------------------------------------------- /Semantic Segmentation/.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | 3 | contact_links: 4 | - name: MMSegmentation Documentation 5 | url: https://mmsegmentation.readthedocs.io 6 | about: Check the docs and FAQ to see if you question is already answered. 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/.github/ISSUE_TEMPLATE/general_questions.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General questions 3 | about: Ask general questions to get help 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /Semantic Segmentation/.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | formats: all 4 | 5 | python: 6 | version: 3.7 7 | install: 8 | - requirements: requirements/docs.txt 9 | - requirements: requirements/readthedocs.txt 10 | -------------------------------------------------------------------------------- /Semantic Segmentation/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements/*.txt 2 | include mmseg/.mim/model-index.yml 3 | recursive-include mmseg/.mim/configs *.py *.yml 4 | recursive-include mmseg/.mim/tools *.py *.sh 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ann_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './apcnet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './apcnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './apcnet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './apcnet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './apcnet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './apcnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py' 2 | data = dict( 3 | samples_per_gpu=8, 4 | workers_per_gpu=8, 5 | ) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py' 2 | # fp16 settings 3 | optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) 4 | # fp16 placeholder 5 | fp16 = dict() 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './ccnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './danet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/danet/danet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_480x480_40k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_40k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_480x480_40k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_480x480_80k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_480x480_80k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_160k_coco-stuff164k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_4x4_160k_coco-stuff164k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_20k_coco-stuff10k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_4x4_20k_coco-stuff10k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_4x4_320k_coco-stuff164k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_40k_coco-stuff10k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_4x4_40k_coco-stuff10k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_4x4_80k_coco-stuff164k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_4x4_80k_coco-stuff164k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101-d8_fp16_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r101-d8_512x1024_80k_cityscapes.py' 2 | # fp16 settings 3 | optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) 4 | # fp16 placeholder 5 | fp16 = dict() 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r101b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3/deeplabv3_r50b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_480x480_40k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_480x480_80k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_4x4_512x512_80k_vaihingen.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_loveda.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x512_80k_loveda.py' 2 | model = dict( 3 | backbone=dict( 4 | depth=101, 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='open-mmlab://resnet101_v1c'))) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_potsdam.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x512_80k_potsdam.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' 2 | # fp16 settings 3 | optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) 4 | # fp16 placeholder 5 | fp16 = dict() 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/deeplabv3plus_r50-d8.py', 3 | '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', 4 | '../_base_/schedules/schedule_40k.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/deeplabv3plus_r50-d8.py', 3 | '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', 4 | '../_base_/schedules/schedule_80k.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dmnet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dmnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './dmnet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './dmnet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dmnet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dmnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dnl_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dnl_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './dnl_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './dnl_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dnl_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './dnl_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './emanet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './emanet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/emanet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './encnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/erfnet_fcn.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | data = dict( 6 | samples_per_gpu=4, 7 | workers_per_gpu=4, 8 | ) 9 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | _base_ = './fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py' 3 | data = dict( 4 | samples_per_gpu=4, 5 | workers_per_gpu=4, 6 | ) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | _base_ = './fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py' 3 | data = dict( 4 | samples_per_gpu=4, 5 | workers_per_gpu=4, 6 | ) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fastfcn_r50-d32_jpu_psp.py', 3 | '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', 4 | '../_base_/schedules/schedule_80k.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r101-d16_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50-d16_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r101-d16_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50-d16_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r101-d16_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50-d16_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r101-d16_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r101b-d16_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50b-d16_512x1024_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50b-d16_769x769_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r50b-d16_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50-d16_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_480x480_40k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_480x480_40k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_480x480_80k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_480x480_80k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101-d8_fp16_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r101-d8_512x1024_80k_cityscapes.py' 2 | # fp16 settings 3 | optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) 4 | # fp16 placeholder 5 | fp16 = dict() 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/fcn/fcn_r50b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcn_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './gcnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/gcnet/gcnet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/vaihingen.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=6)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=150)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=21)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=21)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=150)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x512_80k_loveda.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/loveda.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=7)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/hrnet/fcn_hr18_512x512_80k_potsdam.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fcn_hr18.py', '../_base_/datasets/potsdam.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=6)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' 2 | model = dict(backbone=dict(backbone_cfg=dict(depth=101))) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' 2 | model = dict(backbone=dict(backbone_cfg=dict(depth=101))) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' 2 | model = dict( 3 | backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18))) 4 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' 2 | model = dict( 3 | backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18))) 4 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/icnet_r50-d8.py', 3 | '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', 4 | '../_base_/schedules/schedule_160k.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/icnet_r50-d8.py', 3 | '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', 4 | '../_base_/schedules/schedule_80k.py' 5 | ] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' 2 | model = dict( 3 | backbone=dict( 4 | backbone_cfg=dict( 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' 2 | model = dict( 3 | backbone=dict( 4 | backbone_cfg=dict( 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')))) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './isanet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './nonlocal_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ocrnet_hr18.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pointrend_r50_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/point_rend/pointrend_r101_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pointrend_r50_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/pointrend_r50.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | lr_config = dict(warmup='linear', warmup_iters=200) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './psanet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_480x480_40k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_480x480_40k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_480x480_80k_pascal_context.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_480x480_80k_pascal_context_59.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_potsdam.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_4x4_512x512_80k_potsdam.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_4x4_512x512_80k_vaihingen.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_4x4_512x512_80k_vaihingen.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_dark.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_40k_dark.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x1024_40k_night_driving.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_40k_night_driving.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_160k_coco-stuff164k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_20k_coco-stuff10k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_320k_coco-stuff164k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_40k_coco-stuff10k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_4x4_80k_coco-stuff164k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_loveda.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x512_80k_loveda.py' 2 | model = dict( 3 | backbone=dict( 4 | depth=101, 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='open-mmlab://resnet101_v1c'))) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101-d8_fp16_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r101-d8_512x1024_80k_cityscapes.py' 2 | # fp16 settings 3 | optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) 4 | # fp16 placeholder 5 | fp16 = dict() 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_dark.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_80k_dark.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_night_driving.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_80k_night_driving.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict( 3 | pretrained='torchvision://resnet101', 4 | backbone=dict(type='ResNet', depth=101)) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/loveda.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=7), auxiliary_head=dict(num_classes=7)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './fpn_r50_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './fpn_r50_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fpn_r50.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/fpn_r50.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict(decode_head=dict(num_classes=150)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/setr/setr_mla_512x512_160k_b16_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./setr_mla_512x512_160k_b8_ade20k.py'] 2 | 3 | # num_gpus: 8 -> batch_size: 16 4 | data = dict(samples_per_gpu=2) 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './stdc1_512x1024_80k_cityscapes.py' 2 | model = dict( 3 | backbone=dict( 4 | backbone_cfg=dict( 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='./pretrained/stdc1.pth')))) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/stdc/stdc2_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './stdc1_512x1024_80k_cityscapes.py' 2 | model = dict(backbone=dict(backbone_cfg=dict(stdc_type='STDCNet2'))) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './stdc2_512x1024_80k_cityscapes.py' 2 | model = dict( 3 | backbone=dict( 4 | backbone_cfg=dict( 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='./pretrained/stdc2.pth')))) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | './upernet_swin_base_patch4_window12_512x512_160k_ade20k_' 3 | 'pretrain_384x384_1K.py' 4 | ] 5 | model = dict(pretrained='pretrain/swin_base_patch4_window12_384_22k.pth') 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | './upernet_swin_base_patch4_window7_512x512_160k_ade20k_' 3 | 'pretrain_224x224_1K.py' 4 | ] 5 | model = dict(pretrained='pretrain/swin_base_patch4_window7_224_22k.pth') 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] 2 | 3 | model = dict( 4 | backbone=dict( 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='pretrained/pcpvt_base.pth'), 7 | depths=[3, 4, 18, 3]), ) 8 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] 2 | 3 | model = dict( 4 | backbone=dict( 5 | init_cfg=dict( 6 | type='Pretrained', checkpoint='pretrained/pcpvt_large.pth'), 7 | depths=[3, 8, 27, 3])) 8 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_512x1024_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_512x1024_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_512x512_160k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_512x512_20k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_512x512_20k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_512x512_40k_voc12aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_512x512_40k_voc12aug.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_512x512_80k_ade20k.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_769x769_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_769x769_40k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r101_769x769_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_r50_769x769_80k_cityscapes.py' 2 | model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r50_512x1024_40k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r50_512x1024_80k_cityscapes.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r50_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/upernet/upernet_r50_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/upernet_r50.py', '../_base_/datasets/ade20k.py', 3 | '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' 4 | ] 5 | model = dict( 6 | decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/vit/upernet_deit-b16_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' 2 | 3 | model = dict( 4 | pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', 5 | backbone=dict(drop_path_rate=0.1), 6 | neck=None) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/vit/upernet_deit-b16_512x512_80k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_vit-b16_mln_512x512_80k_ade20k.py' 2 | 3 | model = dict( 4 | pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', 5 | backbone=dict(drop_path_rate=0.1), 6 | neck=None) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/vit/upernet_deit-b16_ln_mln_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' 2 | 3 | model = dict( 4 | pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', 5 | backbone=dict(drop_path_rate=0.1, final_norm=True)) 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/configs/vit/upernet_deit-b16_mln_512x512_160k_ade20k.py: -------------------------------------------------------------------------------- 1 | _base_ = './upernet_vit-b16_mln_512x512_160k_ade20k.py' 2 | 3 | model = dict( 4 | pretrained='pretrain/deit_base_patch16_224-b5f2ef4d.pth', 5 | backbone=dict(drop_path_rate=0.1), 6 | ) 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/custom/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .checkpoint import load_checkpoint 4 | 5 | __all__ = ['load_checkpoint'] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/demo/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/demo/demo.png -------------------------------------------------------------------------------- /Semantic Segmentation/docker/serve/config.properties: -------------------------------------------------------------------------------- 1 | inference_address=http://0.0.0.0:8080 2 | management_address=http://0.0.0.0:8081 3 | metrics_address=http://0.0.0.0:8082 4 | model_store=/home/model-server/model-store 5 | load_models=all 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/docker/serve/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [[ "$1" = "serve" ]]; then 5 | shift 1 6 | torchserve --start --ts-config /home/model-server/config.properties 7 | else 8 | eval "$@" 9 | fi 10 | 11 | # prevent docker exit 12 | tail -f /dev/null 13 | -------------------------------------------------------------------------------- /Semantic Segmentation/docs/en/_static/css/readthedocs.css: -------------------------------------------------------------------------------- 1 | .header-logo { 2 | background-image: url("../images/mmsegmentation.png"); 3 | background-size: 201px 40px; 4 | height: 40px; 5 | width: 201px; 6 | } 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/docs/en/_static/images/mmsegmentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/docs/en/_static/images/mmsegmentation.png -------------------------------------------------------------------------------- /Semantic Segmentation/docs/en/switch_language.md: -------------------------------------------------------------------------------- 1 | ## English 2 | 3 | ## 简体中文 4 | -------------------------------------------------------------------------------- /Semantic Segmentation/docs/en/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | .. toctree:: 2 | :maxdepth: 2 3 | 4 | config.md 5 | customize_datasets.md 6 | data_pipeline.md 7 | customize_models.md 8 | training_tricks.md 9 | customize_runtime.md 10 | -------------------------------------------------------------------------------- /Semantic Segmentation/docs/zh_cn/_static/css/readthedocs.css: -------------------------------------------------------------------------------- 1 | .header-logo { 2 | background-image: url("../images/mmsegmentation.png"); 3 | background-size: 201px 40px; 4 | height: 40px; 5 | width: 201px; 6 | } 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/docs/zh_cn/_static/images/mmsegmentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/docs/zh_cn/_static/images/mmsegmentation.png -------------------------------------------------------------------------------- /Semantic Segmentation/docs/zh_cn/imgs/qq_group_qrcode.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/docs/zh_cn/imgs/qq_group_qrcode.jpg -------------------------------------------------------------------------------- /Semantic Segmentation/docs/zh_cn/imgs/seggroup_qrcode.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/docs/zh_cn/imgs/seggroup_qrcode.jpg -------------------------------------------------------------------------------- /Semantic Segmentation/docs/zh_cn/imgs/zhihu_qrcode.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/docs/zh_cn/imgs/zhihu_qrcode.jpg -------------------------------------------------------------------------------- /Semantic Segmentation/docs/zh_cn/switch_language.md: -------------------------------------------------------------------------------- 1 | ## English 2 | 3 | ## 简体中文 4 | -------------------------------------------------------------------------------- /Semantic Segmentation/docs/zh_cn/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | .. toctree:: 2 | :maxdepth: 2 3 | 4 | config.md 5 | customize_datasets.md 6 | data_pipeline.md 7 | customize_models.md 8 | training_tricks.md 9 | customize_runtime.md 10 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .evaluation import * # noqa: F401, F403 3 | from .seg import * # noqa: F401, F403 4 | from .utils import * # noqa: F401, F403 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/core/seg/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import build_pixel_sampler 3 | from .sampler import BasePixelSampler, OHEMPixelSampler 4 | 5 | __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/core/seg/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_pixel_sampler import BasePixelSampler 3 | from .ohem_pixel_sampler import OHEMPixelSampler 4 | 5 | __all__ = ['BasePixelSampler', 'OHEMPixelSampler'] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .misc import add_prefix 3 | 4 | __all__ = ['add_prefix'] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/models/backbones/ViTAE_Window_NoShift/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd. 2 | # 3 | # This source code is licensed under the Clear BSD License 4 | # LICENSE file in the root directory of this file 5 | # All rights reserved. 6 | from .models import * 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/models/backbones/custom_load/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .checkpoint import load_checkpoint 4 | 5 | __all__ = ['load_checkpoint'] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .fpn import FPN 3 | from .ic_neck import ICNeck 4 | from .jpu import JPU 5 | from .mla_neck import MLANeck 6 | from .multilevel_neck import MultiLevelNeck 7 | 8 | __all__ = ['FPN', 'MultiLevelNeck', 'MLANeck', 'ICNeck', 'JPU'] 9 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import BaseSegmentor 3 | from .cascade_encoder_decoder import CascadeEncoderDecoder 4 | from .encoder_decoder import EncoderDecoder 5 | 6 | __all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder'] 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/mmseg/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .encoding import Encoding 3 | from .wrappers import Upsample, resize 4 | 5 | __all__ = ['Upsample', 'resize', 'Encoding'] 6 | -------------------------------------------------------------------------------- /Semantic Segmentation/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/optional.txt 2 | -r requirements/runtime.txt 3 | -r requirements/tests.txt 4 | -------------------------------------------------------------------------------- /Semantic Segmentation/requirements/docs.txt: -------------------------------------------------------------------------------- 1 | docutils==0.16.0 2 | myst-parser 3 | -e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme 4 | sphinx==4.0.2 5 | sphinx_copybutton 6 | sphinx_markdown_tables 7 | -------------------------------------------------------------------------------- /Semantic Segmentation/requirements/mminstall.txt: -------------------------------------------------------------------------------- 1 | mmcv-full>=1.3.1,<=1.4.0 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | cityscapesscripts 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv 2 | prettytable 3 | torch 4 | torchvision 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | packaging 4 | prettytable 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | codecov 2 | flake8 3 | interrogate 4 | isort==4.3.21 5 | pytest 6 | xdoctest>=0.10.0 7 | yapf 8 | -------------------------------------------------------------------------------- /Semantic Segmentation/resources/3dogs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/resources/3dogs.jpg -------------------------------------------------------------------------------- /Semantic Segmentation/resources/3dogs_mask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/resources/3dogs_mask.png -------------------------------------------------------------------------------- /Semantic Segmentation/resources/mmseg-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/resources/mmseg-logo.png -------------------------------------------------------------------------------- /Semantic Segmentation/resources/seg_demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/resources/seg_demo.gif -------------------------------------------------------------------------------- /Semantic Segmentation/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/tests/test_models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/tests/test_models/test_backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .utils import all_zeros, check_norm_state, is_block, is_norm 3 | 4 | __all__ = ['is_norm', 'is_block', 'all_zeros', 'check_norm_state'] 5 | -------------------------------------------------------------------------------- /Semantic Segmentation/tests/test_models/test_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/tests/test_models/test_losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/tests/test_models/test_necks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/tests/test_models/test_segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /Semantic Segmentation/tests/test_models/test_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/Semantic Segmentation/tests/test_models/test_utils/__init__.py -------------------------------------------------------------------------------- /Semantic Segmentation/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | PORT=${PORT:-29500} 6 | 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /highlycited.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViTAE-Transformer/RSP/f29818739165215d341af2ef8c20f9e2daecf128/highlycited.png --------------------------------------------------------------------------------