├── README.md
├── engine_finetune.py
├── engine_pretrain.py
├── figs
└── fastmim.png
├── main_finetune.py
├── main_pretrain.py
├── mim_cmt_finetune.py
├── mim_cmt_pretrain.py
├── mim_pvtv2_finetune.py
├── mim_pvtv2_pretrain.py
├── mim_swin_finetune.py
├── mim_swin_pretrain.py
├── mim_vit_pretrain.py
├── mmdet-v2.23
├── README.md
├── README_zh-CN.md
├── build_environment.sh
├── configs
│ ├── _base_
│ │ ├── datasets
│ │ │ ├── cityscapes_detection.py
│ │ │ ├── cityscapes_instance.py
│ │ │ ├── coco_detection.py
│ │ │ ├── coco_instance.py
│ │ │ ├── coco_instance_semantic.py
│ │ │ ├── coco_panoptic.py
│ │ │ ├── deepfashion.py
│ │ │ ├── lvis_v0.5_instance.py
│ │ │ ├── lvis_v1_instance.py
│ │ │ ├── openimages_detection.py
│ │ │ ├── voc0712.py
│ │ │ └── wider_face.py
│ │ ├── default_runtime.py
│ │ ├── models
│ │ │ ├── cascade_mask_rcnn_r50_fpn.py
│ │ │ ├── cascade_rcnn_r50_fpn.py
│ │ │ ├── fast_rcnn_r50_fpn.py
│ │ │ ├── faster_rcnn_r50_caffe_c4.py
│ │ │ ├── faster_rcnn_r50_caffe_dc5.py
│ │ │ ├── faster_rcnn_r50_fpn.py
│ │ │ ├── mask_rcnn_r50_caffe_c4.py
│ │ │ ├── mask_rcnn_r50_fpn.py
│ │ │ ├── retinanet_r50_fpn.py
│ │ │ ├── rpn_r50_caffe_c4.py
│ │ │ ├── rpn_r50_fpn.py
│ │ │ └── ssd300.py
│ │ └── schedules
│ │ │ ├── schedule_1x.py
│ │ │ ├── schedule_20e.py
│ │ │ └── schedule_2x.py
│ └── swin
│ │ ├── fastmim_mask_rcnn_upgraded_scp_swin_base_3x_coco.py
│ │ └── simmim_mask_rcnn_upgraded_scp_swin_base_3x_coco.py
├── docs
│ ├── en
│ │ ├── 1_exist_data_model.md
│ │ ├── 2_new_data_model.md
│ │ ├── 3_exist_data_new_model.md
│ │ ├── Makefile
│ │ ├── _static
│ │ │ ├── css
│ │ │ │ └── readthedocs.css
│ │ │ └── image
│ │ │ │ └── mmdet-logo.png
│ │ ├── api.rst
│ │ ├── changelog.md
│ │ ├── compatibility.md
│ │ ├── conf.py
│ │ ├── conventions.md
│ │ ├── faq.md
│ │ ├── get_started.md
│ │ ├── index.rst
│ │ ├── make.bat
│ │ ├── model_zoo.md
│ │ ├── projects.md
│ │ ├── robustness_benchmarking.md
│ │ ├── stat.py
│ │ ├── switch_language.md
│ │ ├── tutorials
│ │ │ ├── config.md
│ │ │ ├── customize_dataset.md
│ │ │ ├── customize_losses.md
│ │ │ ├── customize_models.md
│ │ │ ├── customize_runtime.md
│ │ │ ├── data_pipeline.md
│ │ │ ├── finetune.md
│ │ │ ├── how_to.md
│ │ │ ├── index.rst
│ │ │ ├── init_cfg.md
│ │ │ ├── onnx2tensorrt.md
│ │ │ ├── pytorch2onnx.md
│ │ │ └── test_results_submission.md
│ │ └── useful_tools.md
│ └── zh_cn
│ │ ├── 1_exist_data_model.md
│ │ ├── 2_new_data_model.md
│ │ ├── 3_exist_data_new_model.md
│ │ ├── Makefile
│ │ ├── _static
│ │ ├── css
│ │ │ └── readthedocs.css
│ │ └── image
│ │ │ └── mmdet-logo.png
│ │ ├── api.rst
│ │ ├── article.md
│ │ ├── compatibility.md
│ │ ├── conf.py
│ │ ├── conventions.md
│ │ ├── faq.md
│ │ ├── get_started.md
│ │ ├── index.rst
│ │ ├── make.bat
│ │ ├── model_zoo.md
│ │ ├── projects.md
│ │ ├── robustness_benchmarking.md
│ │ ├── stat.py
│ │ ├── switch_language.md
│ │ ├── tutorials
│ │ ├── config.md
│ │ ├── customize_dataset.md
│ │ ├── customize_losses.md
│ │ ├── customize_models.md
│ │ ├── customize_runtime.md
│ │ ├── data_pipeline.md
│ │ ├── finetune.md
│ │ ├── how_to.md
│ │ ├── index.rst
│ │ ├── init_cfg.md
│ │ ├── onnx2tensorrt.md
│ │ └── pytorch2onnx.md
│ │ └── useful_tools.md
├── mmdet
│ ├── __init__.py
│ ├── apis
│ │ ├── __init__.py
│ │ ├── inference.py
│ │ ├── test.py
│ │ └── train.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── anchor
│ │ │ ├── __init__.py
│ │ │ ├── anchor_generator.py
│ │ │ ├── builder.py
│ │ │ ├── point_generator.py
│ │ │ └── utils.py
│ │ ├── bbox
│ │ │ ├── __init__.py
│ │ │ ├── assigners
│ │ │ │ ├── __init__.py
│ │ │ │ ├── approx_max_iou_assigner.py
│ │ │ │ ├── assign_result.py
│ │ │ │ ├── atss_assigner.py
│ │ │ │ ├── base_assigner.py
│ │ │ │ ├── center_region_assigner.py
│ │ │ │ ├── grid_assigner.py
│ │ │ │ ├── hungarian_assigner.py
│ │ │ │ ├── mask_hungarian_assigner.py
│ │ │ │ ├── max_iou_assigner.py
│ │ │ │ ├── point_assigner.py
│ │ │ │ ├── region_assigner.py
│ │ │ │ ├── sim_ota_assigner.py
│ │ │ │ ├── task_aligned_assigner.py
│ │ │ │ └── uniform_assigner.py
│ │ │ ├── builder.py
│ │ │ ├── coder
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_bbox_coder.py
│ │ │ │ ├── bucketing_bbox_coder.py
│ │ │ │ ├── delta_xywh_bbox_coder.py
│ │ │ │ ├── distance_point_bbox_coder.py
│ │ │ │ ├── legacy_delta_xywh_bbox_coder.py
│ │ │ │ ├── pseudo_bbox_coder.py
│ │ │ │ ├── tblr_bbox_coder.py
│ │ │ │ └── yolo_bbox_coder.py
│ │ │ ├── demodata.py
│ │ │ ├── iou_calculators
│ │ │ │ ├── __init__.py
│ │ │ │ ├── builder.py
│ │ │ │ └── iou2d_calculator.py
│ │ │ ├── match_costs
│ │ │ │ ├── __init__.py
│ │ │ │ ├── builder.py
│ │ │ │ └── match_cost.py
│ │ │ ├── samplers
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_sampler.py
│ │ │ │ ├── combined_sampler.py
│ │ │ │ ├── instance_balanced_pos_sampler.py
│ │ │ │ ├── iou_balanced_neg_sampler.py
│ │ │ │ ├── mask_pseudo_sampler.py
│ │ │ │ ├── mask_sampling_result.py
│ │ │ │ ├── ohem_sampler.py
│ │ │ │ ├── pseudo_sampler.py
│ │ │ │ ├── random_sampler.py
│ │ │ │ ├── sampling_result.py
│ │ │ │ └── score_hlr_sampler.py
│ │ │ └── transforms.py
│ │ ├── data_structures
│ │ │ ├── __init__.py
│ │ │ ├── general_data.py
│ │ │ └── instance_data.py
│ │ ├── evaluation
│ │ │ ├── __init__.py
│ │ │ ├── bbox_overlaps.py
│ │ │ ├── class_names.py
│ │ │ ├── eval_hooks.py
│ │ │ ├── mean_ap.py
│ │ │ ├── panoptic_utils.py
│ │ │ └── recall.py
│ │ ├── export
│ │ │ ├── __init__.py
│ │ │ ├── model_wrappers.py
│ │ │ ├── onnx_helper.py
│ │ │ └── pytorch2onnx.py
│ │ ├── hook
│ │ │ ├── __init__.py
│ │ │ ├── checkloss_hook.py
│ │ │ ├── ema.py
│ │ │ ├── set_epoch_info_hook.py
│ │ │ ├── sync_norm_hook.py
│ │ │ ├── sync_random_size_hook.py
│ │ │ ├── yolox_lrupdater_hook.py
│ │ │ └── yolox_mode_switch_hook.py
│ │ ├── mask
│ │ │ ├── __init__.py
│ │ │ ├── mask_target.py
│ │ │ ├── structures.py
│ │ │ └── utils.py
│ │ ├── post_processing
│ │ │ ├── __init__.py
│ │ │ ├── bbox_nms.py
│ │ │ ├── matrix_nms.py
│ │ │ └── merge_augs.py
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ ├── dist_utils.py
│ │ │ └── misc.py
│ │ └── visualization
│ │ │ ├── __init__.py
│ │ │ ├── image.py
│ │ │ └── palette.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── api_wrappers
│ │ │ ├── __init__.py
│ │ │ ├── coco_api.py
│ │ │ └── panoptic_evaluation.py
│ │ ├── builder.py
│ │ ├── cityscapes.py
│ │ ├── coco.py
│ │ ├── coco_panoptic.py
│ │ ├── custom.py
│ │ ├── dataset_wrappers.py
│ │ ├── deepfashion.py
│ │ ├── lvis.py
│ │ ├── openimages.py
│ │ ├── pipelines
│ │ │ ├── __init__.py
│ │ │ ├── auto_augment.py
│ │ │ ├── compose.py
│ │ │ ├── formating.py
│ │ │ ├── formatting.py
│ │ │ ├── instaboost.py
│ │ │ ├── loading.py
│ │ │ ├── test_time_aug.py
│ │ │ └── transforms.py
│ │ ├── samplers
│ │ │ ├── __init__.py
│ │ │ ├── distributed_sampler.py
│ │ │ ├── group_sampler.py
│ │ │ └── infinite_sampler.py
│ │ ├── utils.py
│ │ ├── voc.py
│ │ ├── wider_face.py
│ │ └── xml_style.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbones
│ │ │ ├── __init__.py
│ │ │ ├── swin.py
│ │ │ └── vit.py
│ │ ├── builder.py
│ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── api_wrappers
│ │ │ │ ├── __init__.py
│ │ │ │ ├── coco_api.py
│ │ │ │ └── panoptic_evaluation.py
│ │ │ ├── builder.py
│ │ │ ├── cityscapes.py
│ │ │ ├── coco.py
│ │ │ ├── coco_panoptic.py
│ │ │ ├── custom.py
│ │ │ ├── dataset_wrappers.py
│ │ │ ├── deepfashion.py
│ │ │ ├── lvis.py
│ │ │ ├── openimages.py
│ │ │ ├── pipelines
│ │ │ │ ├── __init__.py
│ │ │ │ ├── auto_augment.py
│ │ │ │ ├── compose.py
│ │ │ │ ├── formating.py
│ │ │ │ ├── formatting.py
│ │ │ │ ├── instaboost.py
│ │ │ │ ├── loading.py
│ │ │ │ ├── test_time_aug.py
│ │ │ │ └── transforms.py
│ │ │ ├── samplers
│ │ │ │ ├── __init__.py
│ │ │ │ ├── distributed_sampler.py
│ │ │ │ ├── group_sampler.py
│ │ │ │ └── infinite_sampler.py
│ │ │ ├── utils.py
│ │ │ ├── voc.py
│ │ │ ├── wider_face.py
│ │ │ └── xml_style.py
│ │ ├── dense_heads
│ │ │ ├── __init__.py
│ │ │ ├── anchor_free_head.py
│ │ │ ├── anchor_head.py
│ │ │ ├── atss_head.py
│ │ │ ├── autoassign_head.py
│ │ │ ├── base_dense_head.py
│ │ │ ├── base_mask_head.py
│ │ │ ├── cascade_rpn_head.py
│ │ │ ├── centernet_head.py
│ │ │ ├── centripetal_head.py
│ │ │ ├── corner_head.py
│ │ │ ├── deformable_detr_head.py
│ │ │ ├── dense_test_mixins.py
│ │ │ ├── detr_head.py
│ │ │ ├── embedding_rpn_head.py
│ │ │ ├── fcos_head.py
│ │ │ ├── fovea_head.py
│ │ │ ├── free_anchor_retina_head.py
│ │ │ ├── fsaf_head.py
│ │ │ ├── ga_retina_head.py
│ │ │ ├── ga_rpn_head.py
│ │ │ ├── gfl_head.py
│ │ │ ├── guided_anchor_head.py
│ │ │ ├── lad_head.py
│ │ │ ├── ld_head.py
│ │ │ ├── mask2former_head.py
│ │ │ ├── maskformer_head.py
│ │ │ ├── nasfcos_head.py
│ │ │ ├── paa_head.py
│ │ │ ├── pisa_retinanet_head.py
│ │ │ ├── pisa_ssd_head.py
│ │ │ ├── reppoints_head.py
│ │ │ ├── retina_head.py
│ │ │ ├── retina_sepbn_head.py
│ │ │ ├── rpn_head.py
│ │ │ ├── sabl_retina_head.py
│ │ │ ├── solo_head.py
│ │ │ ├── ssd_head.py
│ │ │ ├── tood_head.py
│ │ │ ├── vfnet_head.py
│ │ │ ├── yolact_head.py
│ │ │ ├── yolo_head.py
│ │ │ ├── yolof_head.py
│ │ │ └── yolox_head.py
│ │ ├── detectors
│ │ │ ├── __init__.py
│ │ │ ├── atss.py
│ │ │ ├── autoassign.py
│ │ │ ├── base.py
│ │ │ ├── cascade_rcnn.py
│ │ │ ├── centernet.py
│ │ │ ├── cornernet.py
│ │ │ ├── deformable_detr.py
│ │ │ ├── detr.py
│ │ │ ├── fast_rcnn.py
│ │ │ ├── faster_rcnn.py
│ │ │ ├── fcos.py
│ │ │ ├── fovea.py
│ │ │ ├── fsaf.py
│ │ │ ├── gfl.py
│ │ │ ├── grid_rcnn.py
│ │ │ ├── htc.py
│ │ │ ├── kd_one_stage.py
│ │ │ ├── lad.py
│ │ │ ├── mask2former.py
│ │ │ ├── mask_rcnn.py
│ │ │ ├── mask_scoring_rcnn.py
│ │ │ ├── maskformer.py
│ │ │ ├── nasfcos.py
│ │ │ ├── paa.py
│ │ │ ├── panoptic_fpn.py
│ │ │ ├── panoptic_two_stage_segmentor.py
│ │ │ ├── point_rend.py
│ │ │ ├── queryinst.py
│ │ │ ├── reppoints_detector.py
│ │ │ ├── retinanet.py
│ │ │ ├── rpn.py
│ │ │ ├── scnet.py
│ │ │ ├── single_stage.py
│ │ │ ├── single_stage_instance_seg.py
│ │ │ ├── solo.py
│ │ │ ├── sparse_rcnn.py
│ │ │ ├── tood.py
│ │ │ ├── trident_faster_rcnn.py
│ │ │ ├── two_stage.py
│ │ │ ├── vfnet.py
│ │ │ ├── yolact.py
│ │ │ ├── yolo.py
│ │ │ ├── yolof.py
│ │ │ └── yolox.py
│ │ ├── losses
│ │ │ ├── __init__.py
│ │ │ ├── accuracy.py
│ │ │ ├── ae_loss.py
│ │ │ ├── balanced_l1_loss.py
│ │ │ ├── cross_entropy_loss.py
│ │ │ ├── dice_loss.py
│ │ │ ├── focal_loss.py
│ │ │ ├── gaussian_focal_loss.py
│ │ │ ├── gfocal_loss.py
│ │ │ ├── ghm_loss.py
│ │ │ ├── iou_loss.py
│ │ │ ├── kd_loss.py
│ │ │ ├── mse_loss.py
│ │ │ ├── pisa_loss.py
│ │ │ ├── seesaw_loss.py
│ │ │ ├── smooth_l1_loss.py
│ │ │ ├── utils.py
│ │ │ └── varifocal_loss.py
│ │ ├── necks
│ │ │ ├── __init__.py
│ │ │ ├── bfp.py
│ │ │ ├── channel_mapper.py
│ │ │ ├── ct_resnet_neck.py
│ │ │ ├── dilated_encoder.py
│ │ │ ├── dyhead.py
│ │ │ ├── fpg.py
│ │ │ ├── fpn.py
│ │ │ ├── fpn_carafe.py
│ │ │ ├── hrfpn.py
│ │ │ ├── nas_fpn.py
│ │ │ ├── nasfcos_fpn.py
│ │ │ ├── pafpn.py
│ │ │ ├── rfp.py
│ │ │ ├── ssd_neck.py
│ │ │ ├── yolo_neck.py
│ │ │ └── yolox_pafpn.py
│ │ ├── plugins
│ │ │ ├── __init__.py
│ │ │ ├── dropblock.py
│ │ │ ├── msdeformattn_pixel_decoder.py
│ │ │ └── pixel_decoder.py
│ │ ├── roi_heads
│ │ │ ├── __init__.py
│ │ │ ├── base_roi_head.py
│ │ │ ├── bbox_heads
│ │ │ │ ├── __init__.py
│ │ │ │ ├── bbox_head.py
│ │ │ │ ├── convfc_bbox_head.py
│ │ │ │ ├── dii_head.py
│ │ │ │ ├── double_bbox_head.py
│ │ │ │ ├── sabl_head.py
│ │ │ │ └── scnet_bbox_head.py
│ │ │ ├── cascade_roi_head.py
│ │ │ ├── double_roi_head.py
│ │ │ ├── dynamic_roi_head.py
│ │ │ ├── grid_roi_head.py
│ │ │ ├── htc_roi_head.py
│ │ │ ├── mask_heads
│ │ │ │ ├── __init__.py
│ │ │ │ ├── coarse_mask_head.py
│ │ │ │ ├── dynamic_mask_head.py
│ │ │ │ ├── fcn_mask_head.py
│ │ │ │ ├── feature_relay_head.py
│ │ │ │ ├── fused_semantic_head.py
│ │ │ │ ├── global_context_head.py
│ │ │ │ ├── grid_head.py
│ │ │ │ ├── htc_mask_head.py
│ │ │ │ ├── mask_point_head.py
│ │ │ │ ├── maskiou_head.py
│ │ │ │ ├── scnet_mask_head.py
│ │ │ │ └── scnet_semantic_head.py
│ │ │ ├── mask_scoring_roi_head.py
│ │ │ ├── pisa_roi_head.py
│ │ │ ├── point_rend_roi_head.py
│ │ │ ├── roi_extractors
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_roi_extractor.py
│ │ │ │ ├── generic_roi_extractor.py
│ │ │ │ └── single_level_roi_extractor.py
│ │ │ ├── scnet_roi_head.py
│ │ │ ├── shared_heads
│ │ │ │ ├── __init__.py
│ │ │ │ └── res_layer.py
│ │ │ ├── sparse_roi_head.py
│ │ │ ├── standard_roi_head.py
│ │ │ ├── test_mixins.py
│ │ │ └── trident_roi_head.py
│ │ ├── seg_heads
│ │ │ ├── __init__.py
│ │ │ ├── base_semantic_head.py
│ │ │ ├── panoptic_fpn_head.py
│ │ │ └── panoptic_fusion_heads
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_panoptic_fusion_head.py
│ │ │ │ ├── heuristic_fusion_head.py
│ │ │ │ └── maskformer_fusion_head.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── brick_wrappers.py
│ │ │ ├── builder.py
│ │ │ ├── ckpt_convert.py
│ │ │ ├── conv_upsample.py
│ │ │ ├── csp_layer.py
│ │ │ ├── gaussian_target.py
│ │ │ ├── inverted_residual.py
│ │ │ ├── make_divisible.py
│ │ │ ├── misc.py
│ │ │ ├── normed_predictor.py
│ │ │ ├── panoptic_gt_processing.py
│ │ │ ├── point_sample.py
│ │ │ ├── positional_encoding.py
│ │ │ ├── res_layer.py
│ │ │ ├── se_layer.py
│ │ │ └── transformer.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── collect_env.py
│ │ ├── contextmanagers.py
│ │ ├── logger.py
│ │ ├── misc.py
│ │ ├── profiling.py
│ │ ├── setup_env.py
│ │ ├── util_mixins.py
│ │ └── util_random.py
│ └── version.py
├── pytest.ini
├── requirements.txt
├── requirements
│ ├── albu.txt
│ ├── build.txt
│ ├── docs.txt
│ ├── mminstall.txt
│ ├── optional.txt
│ ├── readthedocs.txt
│ ├── runtime.txt
│ └── tests.txt
├── resources
│ ├── coco_test_12510.jpg
│ ├── corruptions_sev_3.png
│ ├── data_pipeline.png
│ ├── loss_curve.png
│ ├── mmdet-logo.png
│ ├── qq_group_qrcode.jpg
│ └── zhihu_qrcode.jpg
├── setup.cfg
├── setup.py
├── tests
│ ├── data
│ │ ├── VOCdevkit
│ │ │ ├── VOC2007
│ │ │ │ ├── Annotations
│ │ │ │ │ └── 000001.xml
│ │ │ │ ├── ImageSets
│ │ │ │ │ └── Main
│ │ │ │ │ │ ├── test.txt
│ │ │ │ │ │ └── trainval.txt
│ │ │ │ └── JPEGImages
│ │ │ │ │ └── 000001.jpg
│ │ │ └── VOC2012
│ │ │ │ ├── Annotations
│ │ │ │ └── 000001.xml
│ │ │ │ ├── ImageSets
│ │ │ │ └── Main
│ │ │ │ │ ├── test.txt
│ │ │ │ │ └── trainval.txt
│ │ │ │ └── JPEGImages
│ │ │ │ └── 000001.jpg
│ │ ├── coco_sample.json
│ │ ├── color.jpg
│ │ ├── configs_mmtrack
│ │ │ ├── faster_rcnn_r50_dc5.py
│ │ │ ├── faster_rcnn_r50_fpn.py
│ │ │ ├── mot_challenge.py
│ │ │ ├── selsa_faster_rcnn_r101_dc5_1x.py
│ │ │ └── tracktor_faster-rcnn_r50_fpn_4e.py
│ │ ├── custom_dataset
│ │ │ ├── images
│ │ │ │ ├── 000001.jpg
│ │ │ │ └── 000001.xml
│ │ │ ├── test.txt
│ │ │ └── trainval.txt
│ │ └── gray.jpg
│ ├── test_data
│ │ ├── test_datasets
│ │ │ ├── test_coco_dataset.py
│ │ │ ├── test_common.py
│ │ │ ├── test_custom_dataset.py
│ │ │ ├── test_dataset_wrapper.py
│ │ │ ├── test_openimages_dataset.py
│ │ │ ├── test_panoptic_dataset.py
│ │ │ └── test_xml_dataset.py
│ │ ├── test_pipelines
│ │ │ ├── test_formatting.py
│ │ │ ├── test_loading.py
│ │ │ ├── test_sampler.py
│ │ │ └── test_transform
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_img_augment.py
│ │ │ │ ├── test_models_aug_test.py
│ │ │ │ ├── test_rotate.py
│ │ │ │ ├── test_shear.py
│ │ │ │ ├── test_transform.py
│ │ │ │ ├── test_translate.py
│ │ │ │ └── utils.py
│ │ └── test_utils.py
│ ├── test_downstream
│ │ └── test_mmtrack.py
│ ├── test_metrics
│ │ ├── test_box_overlap.py
│ │ ├── test_losses.py
│ │ ├── test_mean_ap.py
│ │ └── test_recall.py
│ ├── test_models
│ │ ├── test_backbones
│ │ │ ├── __init__.py
│ │ │ ├── test_csp_darknet.py
│ │ │ ├── test_detectors_resnet.py
│ │ │ ├── test_efficientnet.py
│ │ │ ├── test_hourglass.py
│ │ │ ├── test_hrnet.py
│ │ │ ├── test_mobilenet_v2.py
│ │ │ ├── test_pvt.py
│ │ │ ├── test_regnet.py
│ │ │ ├── test_renext.py
│ │ │ ├── test_res2net.py
│ │ │ ├── test_resnest.py
│ │ │ ├── test_resnet.py
│ │ │ ├── test_swin.py
│ │ │ ├── test_trident_resnet.py
│ │ │ └── utils.py
│ │ ├── test_dense_heads
│ │ │ ├── test_anchor_head.py
│ │ │ ├── test_atss_head.py
│ │ │ ├── test_autoassign_head.py
│ │ │ ├── test_centernet_head.py
│ │ │ ├── test_corner_head.py
│ │ │ ├── test_dense_heads_attr.py
│ │ │ ├── test_detr_head.py
│ │ │ ├── test_fcos_head.py
│ │ │ ├── test_fsaf_head.py
│ │ │ ├── test_ga_anchor_head.py
│ │ │ ├── test_gfl_head.py
│ │ │ ├── test_lad_head.py
│ │ │ ├── test_ld_head.py
│ │ │ ├── test_mask2former_head.py
│ │ │ ├── test_maskformer_head.py
│ │ │ ├── test_paa_head.py
│ │ │ ├── test_pisa_head.py
│ │ │ ├── test_sabl_retina_head.py
│ │ │ ├── test_solo_head.py
│ │ │ ├── test_tood_head.py
│ │ │ ├── test_vfnet_head.py
│ │ │ ├── test_yolact_head.py
│ │ │ ├── test_yolof_head.py
│ │ │ └── test_yolox_head.py
│ │ ├── test_forward.py
│ │ ├── test_loss.py
│ │ ├── test_loss_compatibility.py
│ │ ├── test_necks.py
│ │ ├── test_plugins.py
│ │ ├── test_roi_heads
│ │ │ ├── __init__.py
│ │ │ ├── test_bbox_head.py
│ │ │ ├── test_mask_head.py
│ │ │ ├── test_roi_extractor.py
│ │ │ ├── test_sabl_bbox_head.py
│ │ │ └── utils.py
│ │ ├── test_seg_heads
│ │ │ └── test_maskformer_fusion_head.py
│ │ └── test_utils
│ │ │ ├── test_brick_wrappers.py
│ │ │ ├── test_conv_upsample.py
│ │ │ ├── test_inverted_residual.py
│ │ │ ├── test_model_misc.py
│ │ │ ├── test_position_encoding.py
│ │ │ ├── test_se_layer.py
│ │ │ └── test_transformer.py
│ ├── test_onnx
│ │ ├── __init__.py
│ │ ├── data
│ │ │ ├── fsaf_head_get_bboxes.pkl
│ │ │ ├── retina_head_get_bboxes.pkl
│ │ │ ├── ssd_head_get_bboxes.pkl
│ │ │ ├── yolov3_head_get_bboxes.pkl
│ │ │ └── yolov3_neck.pkl
│ │ ├── test_head.py
│ │ ├── test_neck.py
│ │ └── utils.py
│ ├── test_runtime
│ │ ├── async_benchmark.py
│ │ ├── test_async.py
│ │ ├── test_config.py
│ │ ├── test_eval_hook.py
│ │ └── test_fp16.py
│ └── test_utils
│ │ ├── test_anchor.py
│ │ ├── test_assigner.py
│ │ ├── test_coder.py
│ │ ├── test_general_data.py
│ │ ├── test_hook.py
│ │ ├── test_logger.py
│ │ ├── test_masks.py
│ │ ├── test_misc.py
│ │ ├── test_nms.py
│ │ ├── test_setup_env.py
│ │ ├── test_version.py
│ │ └── test_visualization.py
└── tools
│ ├── analysis_tools
│ ├── analyze_logs.py
│ ├── analyze_results.py
│ ├── benchmark.py
│ ├── coco_error_analysis.py
│ ├── confusion_matrix.py
│ ├── eval_metric.py
│ ├── get_flops.py
│ ├── optimize_anchors.py
│ ├── robustness_eval.py
│ └── test_robustness.py
│ ├── dataset_converters
│ ├── cityscapes.py
│ ├── images2coco.py
│ └── pascal_voc.py
│ ├── deployment
│ ├── mmdet2torchserve.py
│ ├── mmdet_handler.py
│ ├── onnx2tensorrt.py
│ ├── pytorch2onnx.py
│ ├── test.py
│ └── test_torchserver.py
│ ├── dist_test.sh
│ ├── dist_train.sh
│ ├── misc
│ ├── browse_dataset.py
│ ├── download_dataset.py
│ ├── gen_coco_panoptic_test_info.py
│ ├── get_image_metas.py
│ └── print_config.py
│ ├── model_converters
│ ├── detectron2pytorch.py
│ ├── publish_model.py
│ ├── regnet2mmdet.py
│ ├── selfsup2mmdet.py
│ ├── upgrade_model_version.py
│ └── upgrade_ssd_version.py
│ ├── slurm_test.sh
│ ├── slurm_train.sh
│ ├── test.py
│ └── train.py
├── mmseg-v0.28
├── CITATION.cff
├── LICENSE
├── LICENSES.md
├── MANIFEST.in
├── README.md
├── README_zh-CN.md
├── configs
│ ├── _base_
│ │ ├── datasets
│ │ │ ├── ade20k.py
│ │ │ ├── ade20k_640x640.py
│ │ │ ├── chase_db1.py
│ │ │ ├── cityscapes.py
│ │ │ ├── cityscapes_1024x1024.py
│ │ │ ├── cityscapes_768x768.py
│ │ │ ├── cityscapes_769x769.py
│ │ │ ├── cityscapes_832x832.py
│ │ │ ├── coco-stuff10k.py
│ │ │ ├── coco-stuff164k.py
│ │ │ ├── drive.py
│ │ │ ├── hrf.py
│ │ │ ├── isaid.py
│ │ │ ├── loveda.py
│ │ │ ├── pascal_context.py
│ │ │ ├── pascal_context_59.py
│ │ │ ├── pascal_voc12.py
│ │ │ ├── pascal_voc12_aug.py
│ │ │ ├── potsdam.py
│ │ │ ├── stare.py
│ │ │ └── vaihingen.py
│ │ ├── default_runtime.py
│ │ ├── models
│ │ │ ├── ann_r50-d8.py
│ │ │ ├── apcnet_r50-d8.py
│ │ │ ├── bisenetv1_r18-d32.py
│ │ │ ├── bisenetv2.py
│ │ │ ├── ccnet_r50-d8.py
│ │ │ ├── cgnet.py
│ │ │ ├── danet_r50-d8.py
│ │ │ ├── deeplabv3_r50-d8.py
│ │ │ ├── deeplabv3_unet_s5-d16.py
│ │ │ ├── deeplabv3plus_r50-d8.py
│ │ │ ├── dmnet_r50-d8.py
│ │ │ ├── dnl_r50-d8.py
│ │ │ ├── dpt_vit-b16.py
│ │ │ ├── emanet_r50-d8.py
│ │ │ ├── encnet_r50-d8.py
│ │ │ ├── erfnet_fcn.py
│ │ │ ├── fast_scnn.py
│ │ │ ├── fastfcn_r50-d32_jpu_psp.py
│ │ │ ├── fcn_hr18.py
│ │ │ ├── fcn_r50-d8.py
│ │ │ ├── fcn_unet_s5-d16.py
│ │ │ ├── fpn_r50.py
│ │ │ ├── gcnet_r50-d8.py
│ │ │ ├── icnet_r50-d8.py
│ │ │ ├── isanet_r50-d8.py
│ │ │ ├── lraspp_m-v3-d8.py
│ │ │ ├── nonlocal_r50-d8.py
│ │ │ ├── ocrnet_hr18.py
│ │ │ ├── ocrnet_r50-d8.py
│ │ │ ├── pointrend_r50.py
│ │ │ ├── psanet_r50-d8.py
│ │ │ ├── pspnet_r50-d8.py
│ │ │ ├── pspnet_unet_s5-d16.py
│ │ │ ├── segformer_mit-b0.py
│ │ │ ├── segmenter_vit-b16_mask.py
│ │ │ ├── setr_mla.py
│ │ │ ├── setr_naive.py
│ │ │ ├── setr_pup.py
│ │ │ ├── stdc.py
│ │ │ ├── twins_pcpvt-s_fpn.py
│ │ │ ├── twins_pcpvt-s_upernet.py
│ │ │ ├── upernet_beit.py
│ │ │ ├── upernet_convnext.py
│ │ │ ├── upernet_mae.py
│ │ │ ├── upernet_r50.py
│ │ │ ├── upernet_swin.py
│ │ │ └── upernet_vit-b16_ln_mln.py
│ │ └── schedules
│ │ │ ├── schedule_160k.py
│ │ │ ├── schedule_20k.py
│ │ │ ├── schedule_320k.py
│ │ │ ├── schedule_40k.py
│ │ │ └── schedule_80k.py
│ └── vit
│ │ └── fastmim_vit_base_cfg.py
├── demo
│ ├── MMSegmentation_Tutorial.ipynb
│ ├── demo.png
│ ├── image_demo.py
│ ├── inference_demo.ipynb
│ └── video_demo.py
├── docker
│ ├── Dockerfile
│ └── serve
│ │ ├── Dockerfile
│ │ ├── config.properties
│ │ └── entrypoint.sh
├── docs
│ ├── en
│ │ ├── Makefile
│ │ ├── _static
│ │ │ ├── css
│ │ │ │ └── readthedocs.css
│ │ │ └── images
│ │ │ │ └── mmsegmentation.png
│ │ ├── api.rst
│ │ ├── changelog.md
│ │ ├── conf.py
│ │ ├── dataset_prepare.md
│ │ ├── faq.md
│ │ ├── get_started.md
│ │ ├── index.rst
│ │ ├── inference.md
│ │ ├── make.bat
│ │ ├── model_zoo.md
│ │ ├── stat.py
│ │ ├── switch_language.md
│ │ ├── train.md
│ │ ├── tutorials
│ │ │ ├── config.md
│ │ │ ├── customize_datasets.md
│ │ │ ├── customize_models.md
│ │ │ ├── customize_runtime.md
│ │ │ ├── data_pipeline.md
│ │ │ ├── index.rst
│ │ │ └── training_tricks.md
│ │ └── useful_tools.md
│ └── zh_cn
│ │ ├── Makefile
│ │ ├── _static
│ │ ├── css
│ │ │ └── readthedocs.css
│ │ └── images
│ │ │ └── mmsegmentation.png
│ │ ├── api.rst
│ │ ├── conf.py
│ │ ├── dataset_prepare.md
│ │ ├── faq.md
│ │ ├── get_started.md
│ │ ├── imgs
│ │ ├── qq_group_qrcode.jpg
│ │ └── zhihu_qrcode.jpg
│ │ ├── index.rst
│ │ ├── inference.md
│ │ ├── make.bat
│ │ ├── model_zoo.md
│ │ ├── stat.py
│ │ ├── switch_language.md
│ │ ├── train.md
│ │ ├── tutorials
│ │ ├── config.md
│ │ ├── customize_datasets.md
│ │ ├── customize_models.md
│ │ ├── customize_runtime.md
│ │ ├── data_pipeline.md
│ │ ├── index.rst
│ │ └── training_tricks.md
│ │ └── useful_tools.md
├── mmseg
│ ├── __init__.py
│ ├── apis
│ │ ├── __init__.py
│ │ ├── inference.py
│ │ ├── test.py
│ │ └── train.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── builder.py
│ │ ├── evaluation
│ │ │ ├── __init__.py
│ │ │ ├── class_names.py
│ │ │ ├── eval_hooks.py
│ │ │ └── metrics.py
│ │ ├── hook
│ │ │ ├── __init__.py
│ │ │ └── wandblogger_hook.py
│ │ ├── optimizers
│ │ │ ├── __init__.py
│ │ │ └── layer_decay_optimizer_constructor.py
│ │ ├── seg
│ │ │ ├── __init__.py
│ │ │ ├── builder.py
│ │ │ └── sampler
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_pixel_sampler.py
│ │ │ │ └── ohem_pixel_sampler.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── dist_util.py
│ │ │ └── misc.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── ade.py
│ │ ├── builder.py
│ │ ├── chase_db1.py
│ │ ├── cityscapes.py
│ │ ├── coco_stuff.py
│ │ ├── custom.py
│ │ ├── dark_zurich.py
│ │ ├── dataset_wrappers.py
│ │ ├── drive.py
│ │ ├── hrf.py
│ │ ├── isaid.py
│ │ ├── isprs.py
│ │ ├── loveda.py
│ │ ├── night_driving.py
│ │ ├── pascal_context.py
│ │ ├── pipelines
│ │ │ ├── __init__.py
│ │ │ ├── compose.py
│ │ │ ├── formating.py
│ │ │ ├── formatting.py
│ │ │ ├── loading.py
│ │ │ ├── test_time_aug.py
│ │ │ └── transforms.py
│ │ ├── potsdam.py
│ │ ├── samplers
│ │ │ ├── __init__.py
│ │ │ └── distributed_sampler.py
│ │ ├── stare.py
│ │ └── voc.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbones
│ │ │ ├── __init__.py
│ │ │ ├── beit.py
│ │ │ ├── bisenetv1.py
│ │ │ ├── bisenetv2.py
│ │ │ ├── cgnet.py
│ │ │ ├── erfnet.py
│ │ │ ├── fast_scnn.py
│ │ │ ├── hrnet.py
│ │ │ ├── icnet.py
│ │ │ ├── mae.py
│ │ │ ├── mit.py
│ │ │ ├── mobilenet_v2.py
│ │ │ ├── mobilenet_v3.py
│ │ │ ├── resnest.py
│ │ │ ├── resnet.py
│ │ │ ├── resnext.py
│ │ │ ├── stdc.py
│ │ │ ├── swin.py
│ │ │ ├── timm_backbone.py
│ │ │ ├── twins.py
│ │ │ ├── unet.py
│ │ │ ├── vit.py
│ │ │ └── vit_beit.py
│ │ ├── builder.py
│ │ ├── decode_heads
│ │ │ ├── __init__.py
│ │ │ ├── ann_head.py
│ │ │ ├── apc_head.py
│ │ │ ├── aspp_head.py
│ │ │ ├── cascade_decode_head.py
│ │ │ ├── cc_head.py
│ │ │ ├── da_head.py
│ │ │ ├── decode_head.py
│ │ │ ├── dm_head.py
│ │ │ ├── dnl_head.py
│ │ │ ├── dpt_head.py
│ │ │ ├── ema_head.py
│ │ │ ├── enc_head.py
│ │ │ ├── fcn_head.py
│ │ │ ├── fpn_head.py
│ │ │ ├── gc_head.py
│ │ │ ├── isa_head.py
│ │ │ ├── knet_head.py
│ │ │ ├── lraspp_head.py
│ │ │ ├── nl_head.py
│ │ │ ├── ocr_head.py
│ │ │ ├── point_head.py
│ │ │ ├── psa_head.py
│ │ │ ├── psp_head.py
│ │ │ ├── segformer_head.py
│ │ │ ├── segmenter_mask_head.py
│ │ │ ├── sep_aspp_head.py
│ │ │ ├── sep_fcn_head.py
│ │ │ ├── setr_mla_head.py
│ │ │ ├── setr_up_head.py
│ │ │ ├── stdc_head.py
│ │ │ └── uper_head.py
│ │ ├── losses
│ │ │ ├── __init__.py
│ │ │ ├── accuracy.py
│ │ │ ├── cross_entropy_loss.py
│ │ │ ├── dice_loss.py
│ │ │ ├── focal_loss.py
│ │ │ ├── lovasz_loss.py
│ │ │ ├── tversky_loss.py
│ │ │ └── utils.py
│ │ ├── necks
│ │ │ ├── __init__.py
│ │ │ ├── featurepyramid.py
│ │ │ ├── fpn.py
│ │ │ ├── ic_neck.py
│ │ │ ├── jpu.py
│ │ │ ├── mla_neck.py
│ │ │ └── multilevel_neck.py
│ │ ├── segmentors
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── cascade_encoder_decoder.py
│ │ │ └── encoder_decoder.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── embed.py
│ │ │ ├── inverted_residual.py
│ │ │ ├── make_divisible.py
│ │ │ ├── res_layer.py
│ │ │ ├── se_layer.py
│ │ │ ├── self_attention_block.py
│ │ │ ├── shape_convert.py
│ │ │ └── up_conv_block.py
│ ├── ops
│ │ ├── __init__.py
│ │ ├── encoding.py
│ │ └── wrappers.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── collect_env.py
│ │ ├── logger.py
│ │ ├── misc.py
│ │ ├── set_env.py
│ │ └── util_distribution.py
│ └── version.py
├── model-index.yml
├── pytest.ini
├── requirements.txt
├── requirements
│ ├── docs.txt
│ ├── mminstall.txt
│ ├── optional.txt
│ ├── readthedocs.txt
│ ├── runtime.txt
│ └── tests.txt
├── resources
│ ├── 3dogs.jpg
│ ├── 3dogs_mask.png
│ ├── mmseg-logo.png
│ └── seg_demo.gif
├── setup.cfg
├── setup.py
├── tests
│ ├── __init__.py
│ ├── data
│ │ ├── color.jpg
│ │ ├── gray.jpg
│ │ ├── pseudo_cityscapes_dataset
│ │ │ ├── gtFine
│ │ │ │ ├── frankfurt_000000_000294_gtFine_instanceIds.png
│ │ │ │ ├── frankfurt_000000_000294_gtFine_labelIds.png
│ │ │ │ └── frankfurt_000000_000294_gtFine_labelTrainIds.png
│ │ │ └── leftImg8bit
│ │ │ │ └── frankfurt_000000_000294_leftImg8bit.png
│ │ ├── pseudo_dataset
│ │ │ ├── gts
│ │ │ │ ├── 00000_gt.png
│ │ │ │ ├── 00001_gt.png
│ │ │ │ ├── 00002_gt.png
│ │ │ │ ├── 00003_gt.png
│ │ │ │ └── 00004_gt.png
│ │ │ ├── imgs
│ │ │ │ ├── 00000_img.jpg
│ │ │ │ ├── 00001_img.jpg
│ │ │ │ ├── 00002_img.jpg
│ │ │ │ ├── 00003_img.jpg
│ │ │ │ └── 00004_img.jpg
│ │ │ └── splits
│ │ │ │ ├── train.txt
│ │ │ │ └── val.txt
│ │ ├── pseudo_isaid_dataset
│ │ │ ├── ann_dir
│ │ │ │ ├── P0000_0_896_1024_1920_instance_color_RGB.png
│ │ │ │ └── P0000_0_896_1536_2432_instance_color_RGB.png
│ │ │ ├── img_dir
│ │ │ │ ├── P0000_0_896_1024_1920.png
│ │ │ │ └── P0000_0_896_1536_2432.png
│ │ │ └── splits
│ │ │ │ ├── train.txt
│ │ │ │ └── val.txt
│ │ ├── pseudo_loveda_dataset
│ │ │ ├── ann_dir
│ │ │ │ ├── 0.png
│ │ │ │ ├── 1.png
│ │ │ │ └── 2.png
│ │ │ └── img_dir
│ │ │ │ ├── 0.png
│ │ │ │ ├── 1.png
│ │ │ │ └── 2.png
│ │ ├── pseudo_potsdam_dataset
│ │ │ ├── ann_dir
│ │ │ │ └── 2_10_0_0_512_512.png
│ │ │ └── img_dir
│ │ │ │ └── 2_10_0_0_512_512.png
│ │ ├── pseudo_vaihingen_dataset
│ │ │ ├── ann_dir
│ │ │ │ └── area1_0_0_512_512.png
│ │ │ └── img_dir
│ │ │ │ └── area1_0_0_512_512.png
│ │ └── seg.png
│ ├── test_apis
│ │ └── test_single_gpu.py
│ ├── test_config.py
│ ├── test_core
│ │ ├── test_layer_decay_optimizer_constructor.py
│ │ └── test_optimizer.py
│ ├── test_data
│ │ ├── test_dataset.py
│ │ ├── test_dataset_builder.py
│ │ ├── test_loading.py
│ │ ├── test_transform.py
│ │ └── test_tta.py
│ ├── test_digit_version.py
│ ├── test_eval_hook.py
│ ├── test_inference.py
│ ├── test_metrics.py
│ ├── test_models
│ │ ├── __init__.py
│ │ ├── test_backbones
│ │ │ ├── __init__.py
│ │ │ ├── test_beit.py
│ │ │ ├── test_bisenetv1.py
│ │ │ ├── test_bisenetv2.py
│ │ │ ├── test_blocks.py
│ │ │ ├── test_cgnet.py
│ │ │ ├── test_erfnet.py
│ │ │ ├── test_fast_scnn.py
│ │ │ ├── test_hrnet.py
│ │ │ ├── test_icnet.py
│ │ │ ├── test_mae.py
│ │ │ ├── test_mit.py
│ │ │ ├── test_mobilenet_v3.py
│ │ │ ├── test_resnest.py
│ │ │ ├── test_resnet.py
│ │ │ ├── test_resnext.py
│ │ │ ├── test_stdc.py
│ │ │ ├── test_swin.py
│ │ │ ├── test_timm_backbone.py
│ │ │ ├── test_twins.py
│ │ │ ├── test_unet.py
│ │ │ ├── test_vit.py
│ │ │ └── utils.py
│ │ ├── test_forward.py
│ │ ├── test_heads
│ │ │ ├── __init__.py
│ │ │ ├── test_ann_head.py
│ │ │ ├── test_apc_head.py
│ │ │ ├── test_aspp_head.py
│ │ │ ├── test_cc_head.py
│ │ │ ├── test_da_head.py
│ │ │ ├── test_decode_head.py
│ │ │ ├── test_dm_head.py
│ │ │ ├── test_dnl_head.py
│ │ │ ├── test_dpt_head.py
│ │ │ ├── test_ema_head.py
│ │ │ ├── test_enc_head.py
│ │ │ ├── test_fcn_head.py
│ │ │ ├── test_gc_head.py
│ │ │ ├── test_isa_head.py
│ │ │ ├── test_knet_head.py
│ │ │ ├── test_lraspp_head.py
│ │ │ ├── test_nl_head.py
│ │ │ ├── test_ocr_head.py
│ │ │ ├── test_point_head.py
│ │ │ ├── test_psa_head.py
│ │ │ ├── test_psp_head.py
│ │ │ ├── test_segformer_head.py
│ │ │ ├── test_segmenter_mask_head.py
│ │ │ ├── test_setr_mla_head.py
│ │ │ ├── test_setr_up_head.py
│ │ │ ├── test_stdc_head.py
│ │ │ ├── test_uper_head.py
│ │ │ └── utils.py
│ │ ├── test_losses
│ │ │ ├── __init__.py
│ │ │ ├── test_ce_loss.py
│ │ │ ├── test_dice_loss.py
│ │ │ ├── test_focal_loss.py
│ │ │ ├── test_lovasz_loss.py
│ │ │ ├── test_tversky_loss.py
│ │ │ └── test_utils.py
│ │ ├── test_necks
│ │ │ ├── __init__.py
│ │ │ ├── test_feature2pyramid.py
│ │ │ ├── test_fpn.py
│ │ │ ├── test_ic_neck.py
│ │ │ ├── test_jpu.py
│ │ │ ├── test_mla_neck.py
│ │ │ └── test_multilevel_neck.py
│ │ ├── test_segmentors
│ │ │ ├── __init__.py
│ │ │ ├── test_cascade_encoder_decoder.py
│ │ │ ├── test_encoder_decoder.py
│ │ │ └── utils.py
│ │ └── test_utils
│ │ │ ├── __init__.py
│ │ │ ├── test_embed.py
│ │ │ └── test_shape_convert.py
│ ├── test_sampler.py
│ └── test_utils
│ │ ├── test_misc.py
│ │ ├── test_set_env.py
│ │ └── test_util_distribution.py
└── tools
│ ├── analyze_logs.py
│ ├── benchmark.py
│ ├── browse_dataset.py
│ ├── confusion_matrix.py
│ ├── convert_datasets
│ ├── chase_db1.py
│ ├── cityscapes.py
│ ├── coco_stuff10k.py
│ ├── coco_stuff164k.py
│ ├── drive.py
│ ├── hrf.py
│ ├── isaid.py
│ ├── loveda.py
│ ├── pascal_context.py
│ ├── potsdam.py
│ ├── stare.py
│ ├── vaihingen.py
│ └── voc_aug.py
│ ├── deploy_test.py
│ ├── dist_test.sh
│ ├── dist_train.sh
│ ├── get_flops.py
│ ├── model_converters
│ ├── beit2mmseg.py
│ ├── mit2mmseg.py
│ ├── stdc2mmseg.py
│ ├── swin2mmseg.py
│ ├── twins2mmseg.py
│ ├── vit2mmseg.py
│ └── vitjax2mmseg.py
│ ├── onnx2tensorrt.py
│ ├── print_config.py
│ ├── publish_model.py
│ ├── pytorch2onnx.py
│ ├── pytorch2torchscript.py
│ ├── slurm_test.sh
│ ├── slurm_train.sh
│ ├── test.py
│ ├── torchserve
│ ├── mmseg2torchserve.py
│ ├── mmseg_handler.py
│ └── test_torchserve.py
│ └── train.py
├── models_mae.py
├── models_vit.py
├── requirement_pip_install.sh
└── util
├── datasets.py
├── hog_layer.py
├── lr_decay.py
├── lr_sched.py
├── misc.py
└── pos_embed.py
/figs/fastmim.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/figs/fastmim.png
--------------------------------------------------------------------------------
/mmdet-v2.23/build_environment.sh:
--------------------------------------------------------------------------------
1 | pip install --upgrade pip;
2 | pip install torch==1.7.0 torchvision==0.8.1;
3 | pip install timm==0.3.2;
4 | pip install einops;
5 | pip install torchprofile;
6 | pip install tensorboardX;
7 | pip install cupy-cuda100;
8 | pip install addict;
9 | pip install yapf;
10 | pip install cython;
11 | pip install scikit-build;
12 | pip install opencv-python;
13 | pip install pytest-runner;
14 | pip install terminaltables==3.1.0;
15 | pip install mmpycocotools;
--------------------------------------------------------------------------------
/mmdet-v2.23/configs/_base_/datasets/lvis_v0.5_instance.py:
--------------------------------------------------------------------------------
1 | # dataset settings
2 | _base_ = 'coco_instance.py'
3 | dataset_type = 'LVISV05Dataset'
4 | data_root = 'data/lvis_v0.5/'
5 | data = dict(
6 | samples_per_gpu=2,
7 | workers_per_gpu=2,
8 | train=dict(
9 | _delete_=True,
10 | type='ClassBalancedDataset',
11 | oversample_thr=1e-3,
12 | dataset=dict(
13 | type=dataset_type,
14 | ann_file=data_root + 'annotations/lvis_v0.5_train.json',
15 | img_prefix=data_root + 'train2017/')),
16 | val=dict(
17 | type=dataset_type,
18 | ann_file=data_root + 'annotations/lvis_v0.5_val.json',
19 | img_prefix=data_root + 'val2017/'),
20 | test=dict(
21 | type=dataset_type,
22 | ann_file=data_root + 'annotations/lvis_v0.5_val.json',
23 | img_prefix=data_root + 'val2017/'))
24 | evaluation = dict(metric=['bbox', 'segm'])
25 |
--------------------------------------------------------------------------------
/mmdet-v2.23/configs/_base_/datasets/lvis_v1_instance.py:
--------------------------------------------------------------------------------
1 | # dataset settings
2 | _base_ = 'coco_instance.py'
3 | dataset_type = 'LVISV1Dataset'
4 | data_root = 'data/lvis_v1/'
5 | data = dict(
6 | samples_per_gpu=2,
7 | workers_per_gpu=2,
8 | train=dict(
9 | _delete_=True,
10 | type='ClassBalancedDataset',
11 | oversample_thr=1e-3,
12 | dataset=dict(
13 | type=dataset_type,
14 | ann_file=data_root + 'annotations/lvis_v1_train.json',
15 | img_prefix=data_root)),
16 | val=dict(
17 | type=dataset_type,
18 | ann_file=data_root + 'annotations/lvis_v1_val.json',
19 | img_prefix=data_root),
20 | test=dict(
21 | type=dataset_type,
22 | ann_file=data_root + 'annotations/lvis_v1_val.json',
23 | img_prefix=data_root))
24 | evaluation = dict(metric=['bbox', 'segm'])
25 |
--------------------------------------------------------------------------------
/mmdet-v2.23/configs/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | checkpoint_config = dict(interval=1)
2 | # yapf:disable
3 | log_config = dict(
4 | interval=50,
5 | hooks=[
6 | dict(type='TextLoggerHook'),
7 | # dict(type='TensorboardLoggerHook')
8 | ])
9 | # yapf:enable
10 | custom_hooks = [dict(type='NumClassCheckHook')]
11 |
12 | dist_params = dict(backend='nccl')
13 | log_level = 'INFO'
14 | load_from = None
15 | resume_from = None
16 | workflow = [('train', 1)]
17 |
18 | # disable opencv multithreading to avoid system being overloaded
19 | opencv_num_threads = 0
20 | # set multi-process start method as `fork` to speed up the training
21 | mp_start_method = 'fork'
22 |
--------------------------------------------------------------------------------
/mmdet-v2.23/configs/_base_/schedules/schedule_1x.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
3 | optimizer_config = dict(grad_clip=None)
4 | # learning policy
5 | lr_config = dict(
6 | policy='step',
7 | warmup='linear',
8 | warmup_iters=500,
9 | warmup_ratio=0.001,
10 | step=[8, 11])
11 | runner = dict(type='EpochBasedRunner', max_epochs=12)
12 |
--------------------------------------------------------------------------------
/mmdet-v2.23/configs/_base_/schedules/schedule_20e.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
3 | optimizer_config = dict(grad_clip=None)
4 | # learning policy
5 | lr_config = dict(
6 | policy='step',
7 | warmup='linear',
8 | warmup_iters=500,
9 | warmup_ratio=0.001,
10 | step=[16, 19])
11 | runner = dict(type='EpochBasedRunner', max_epochs=20)
12 |
--------------------------------------------------------------------------------
/mmdet-v2.23/configs/_base_/schedules/schedule_2x.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
3 | optimizer_config = dict(grad_clip=None)
4 | # learning policy
5 | lr_config = dict(
6 | policy='step',
7 | warmup='linear',
8 | warmup_iters=500,
9 | warmup_ratio=0.001,
10 | step=[16, 22])
11 | runner = dict(type='EpochBasedRunner', max_epochs=24)
12 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/en/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/en/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../image/mmdet-logo.png");
3 | background-size: 156px 40px;
4 | height: 40px;
5 | width: 156px;
6 | }
7 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/en/_static/image/mmdet-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/docs/en/_static/image/mmdet-logo.png
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/en/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to MMDetection's documentation!
2 | =======================================
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 | :caption: Get Started
7 |
8 | get_started.md
9 | modelzoo_statistics.md
10 | model_zoo.md
11 |
12 | .. toctree::
13 | :maxdepth: 2
14 | :caption: Quick Run
15 |
16 | 1_exist_data_model.md
17 | 2_new_data_model.md
18 | 3_exist_data_new_model.md
19 |
20 | .. toctree::
21 | :maxdepth: 2
22 | :caption: Tutorials
23 |
24 | tutorials/index.rst
25 |
26 | .. toctree::
27 | :maxdepth: 2
28 | :caption: Useful Tools and Scripts
29 |
30 | useful_tools.md
31 |
32 | .. toctree::
33 | :maxdepth: 2
34 | :caption: Notes
35 |
36 | conventions.md
37 | compatibility.md
38 | projects.md
39 | changelog.md
40 | faq.md
41 |
42 | .. toctree::
43 | :caption: Switch Language
44 |
45 | switch_language.md
46 |
47 | .. toctree::
48 | :maxdepth: 1
49 | :caption: API Reference
50 |
51 | api.rst
52 |
53 | Indices and tables
54 | ==================
55 |
56 | * :ref:`genindex`
57 | * :ref:`search`
58 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/en/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/en/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/en/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 2
3 |
4 | config.md
5 | customize_dataset.md
6 | data_pipeline.md
7 | customize_models.md
8 | customize_runtime.md
9 | customize_losses.md
10 | finetune.md
11 | robustness_benchmarking.md
12 | pytorch2onnx.md
13 | onnx2tensorrt.md
14 | init_cfg.md
15 | how_to.md
16 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../image/mmdet-logo.png");
3 | background-size: 156px 40px;
4 | height: 40px;
5 | width: 156px;
6 | }
7 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/_static/image/mmdet-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/docs/zh_cn/_static/image/mmdet-logo.png
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to MMDetection's documentation!
2 | =======================================
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 | :caption: 开始你的第一步
7 |
8 | get_started.md
9 | model_zoo.md
10 | article.md
11 |
12 | .. toctree::
13 | :maxdepth: 2
14 | :caption: 快速启动
15 |
16 | 1_exist_data_model.md
17 | 2_new_data_model.md
18 |
19 | .. toctree::
20 | :maxdepth: 2
21 | :caption: 教程
22 |
23 | tutorials/index.rst
24 |
25 | .. toctree::
26 | :maxdepth: 2
27 | :caption: 实用工具与脚本
28 |
29 | useful_tools.md
30 |
31 | .. toctree::
32 | :maxdepth: 2
33 | :caption: 说明
34 |
35 | conventions.md
36 | compatibility.md
37 | faq.md
38 |
39 | .. toctree::
40 | :caption: 语言切换
41 |
42 | switch_language.md
43 |
44 | .. toctree::
45 | :maxdepth: 1
46 | :caption: 接口文档(英文)
47 |
48 | api.rst
49 |
50 |
51 | Indices and tables
52 | ==================
53 |
54 | * :ref:`genindex`
55 | * :ref:`search`
56 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/tutorials/customize_runtime.md:
--------------------------------------------------------------------------------
1 | # 教程 5: 自定义训练配置
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 2
3 |
4 | config.md
5 | customize_dataset.md
6 | data_pipeline.md
7 | customize_models.md
8 | customize_runtime.md
9 | customize_losses.md
10 | finetune.md
11 | pytorch2onnx.md
12 | onnx2tensorrt.md
13 | init_cfg.md
14 | how_to.md
15 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/tutorials/pytorch2onnx.md:
--------------------------------------------------------------------------------
1 | # 教程 8: Pytorch 到 ONNX 的模型转换(实验性支持)
2 |
3 |
4 | > ## [尝试使用新的 MMDeploy 來部署你的模型](https://mmdeploy.readthedocs.io/)
5 |
--------------------------------------------------------------------------------
/mmdet-v2.23/docs/zh_cn/useful_tools.md:
--------------------------------------------------------------------------------
1 | ## 日志分析
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import mmcv
3 |
4 | from .version import __version__, short_version
5 |
6 |
7 | def digit_version(version_str):
8 | digit_version = []
9 | for x in version_str.split('.'):
10 | if x.isdigit():
11 | digit_version.append(int(x))
12 | elif x.find('rc') != -1:
13 | patch_version = x.split('rc')
14 | digit_version.append(int(patch_version[0]) - 1)
15 | digit_version.append(int(patch_version[1]))
16 | return digit_version
17 |
18 |
19 | mmcv_minimum_version = '1.3.17'
20 | mmcv_maximum_version = '1.5.0'
21 | mmcv_version = digit_version(mmcv.__version__)
22 |
23 |
24 | assert (mmcv_version >= digit_version(mmcv_minimum_version)
25 | and mmcv_version <= digit_version(mmcv_maximum_version)), \
26 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \
27 | f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
28 |
29 | __all__ = ['__version__', 'short_version']
30 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/apis/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .inference import (async_inference_detector, inference_detector,
3 | init_detector, show_result_pyplot)
4 | from .test import multi_gpu_test, single_gpu_test
5 | from .train import (get_root_logger, init_random_seed, set_random_seed,
6 | train_detector)
7 |
8 | __all__ = [
9 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
10 | 'async_inference_detector', 'inference_detector', 'show_result_pyplot',
11 | 'multi_gpu_test', 'single_gpu_test', 'init_random_seed'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .anchor import * # noqa: F401, F403
3 | from .bbox import * # noqa: F401, F403
4 | from .data_structures import * # noqa: F401, F403
5 | from .evaluation import * # noqa: F401, F403
6 | from .hook import * # noqa: F401, F403
7 | from .mask import * # noqa: F401, F403
8 | from .post_processing import * # noqa: F401, F403
9 | from .utils import * # noqa: F401, F403
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/anchor/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
3 | YOLOAnchorGenerator)
4 | from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
5 | build_anchor_generator, build_prior_generator)
6 | from .point_generator import MlvlPointGenerator, PointGenerator
7 | from .utils import anchor_inside_flags, calc_region, images_to_levels
8 |
9 | __all__ = [
10 | 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
11 | 'PointGenerator', 'images_to_levels', 'calc_region',
12 | 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator',
13 | 'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/anchor/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import warnings
3 |
4 | from mmcv.utils import Registry, build_from_cfg
5 |
6 | PRIOR_GENERATORS = Registry('Generator for anchors and points')
7 |
8 | ANCHOR_GENERATORS = PRIOR_GENERATORS
9 |
10 |
11 | def build_prior_generator(cfg, default_args=None):
12 | return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
13 |
14 |
15 | def build_anchor_generator(cfg, default_args=None):
16 | warnings.warn(
17 | '``build_anchor_generator`` would be deprecated soon, please use '
18 | '``build_prior_generator`` ')
19 | return build_prior_generator(cfg, default_args=default_args)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/assigners/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner
3 | from .assign_result import AssignResult
4 | from .atss_assigner import ATSSAssigner
5 | from .base_assigner import BaseAssigner
6 | from .center_region_assigner import CenterRegionAssigner
7 | from .grid_assigner import GridAssigner
8 | from .hungarian_assigner import HungarianAssigner
9 | from .mask_hungarian_assigner import MaskHungarianAssigner
10 | from .max_iou_assigner import MaxIoUAssigner
11 | from .point_assigner import PointAssigner
12 | from .region_assigner import RegionAssigner
13 | from .sim_ota_assigner import SimOTAAssigner
14 | from .task_aligned_assigner import TaskAlignedAssigner
15 | from .uniform_assigner import UniformAssigner
16 |
17 | __all__ = [
18 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
19 | 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
20 | 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
21 | 'TaskAlignedAssigner', 'MaskHungarianAssigner'
22 | ]
23 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/assigners/base_assigner.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from abc import ABCMeta, abstractmethod
3 |
4 |
5 | class BaseAssigner(metaclass=ABCMeta):
6 | """Base assigner that assigns boxes to ground truth boxes."""
7 |
8 | @abstractmethod
9 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
10 | """Assign boxes to either a ground truth boxes or a negative boxes."""
11 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.utils import Registry, build_from_cfg
3 |
4 | BBOX_ASSIGNERS = Registry('bbox_assigner')
5 | BBOX_SAMPLERS = Registry('bbox_sampler')
6 | BBOX_CODERS = Registry('bbox_coder')
7 |
8 |
9 | def build_assigner(cfg, **default_args):
10 | """Builder of box assigner."""
11 | return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
12 |
13 |
14 | def build_sampler(cfg, **default_args):
15 | """Builder of box sampler."""
16 | return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
17 |
18 |
19 | def build_bbox_coder(cfg, **default_args):
20 | """Builder of box coder."""
21 | return build_from_cfg(cfg, BBOX_CODERS, default_args)
22 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/coder/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_bbox_coder import BaseBBoxCoder
3 | from .bucketing_bbox_coder import BucketingBBoxCoder
4 | from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
5 | from .distance_point_bbox_coder import DistancePointBBoxCoder
6 | from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
7 | from .pseudo_bbox_coder import PseudoBBoxCoder
8 | from .tblr_bbox_coder import TBLRBBoxCoder
9 | from .yolo_bbox_coder import YOLOBBoxCoder
10 |
11 | __all__ = [
12 | 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
13 | 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
14 | 'BucketingBBoxCoder', 'DistancePointBBoxCoder'
15 | ]
16 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/coder/base_bbox_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from abc import ABCMeta, abstractmethod
3 |
4 |
5 | class BaseBBoxCoder(metaclass=ABCMeta):
6 | """Base bounding box coder."""
7 |
8 | def __init__(self, **kwargs):
9 | pass
10 |
11 | @abstractmethod
12 | def encode(self, bboxes, gt_bboxes):
13 | """Encode deltas between bboxes and ground truth boxes."""
14 |
15 | @abstractmethod
16 | def decode(self, bboxes, bboxes_pred):
17 | """Decode the predicted bboxes according to prediction and base
18 | boxes."""
19 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/coder/pseudo_bbox_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import BBOX_CODERS
3 | from .base_bbox_coder import BaseBBoxCoder
4 |
5 |
6 | @BBOX_CODERS.register_module()
7 | class PseudoBBoxCoder(BaseBBoxCoder):
8 | """Pseudo bounding box coder."""
9 |
10 | def __init__(self, **kwargs):
11 | super(BaseBBoxCoder, self).__init__(**kwargs)
12 |
13 | def encode(self, bboxes, gt_bboxes):
14 | """torch.Tensor: return the given ``bboxes``"""
15 | return gt_bboxes
16 |
17 | def decode(self, bboxes, pred_bboxes):
18 | """torch.Tensor: return the given ``pred_bboxes``"""
19 | return pred_bboxes
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/demodata.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import numpy as np
3 | import torch
4 |
5 | from mmdet.utils.util_random import ensure_rng
6 |
7 |
8 | def random_boxes(num=1, scale=1, rng=None):
9 | """Simple version of ``kwimage.Boxes.random``
10 |
11 | Returns:
12 | Tensor: shape (n, 4) in x1, y1, x2, y2 format.
13 |
14 | References:
15 | https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
16 |
17 | Example:
18 | >>> num = 3
19 | >>> scale = 512
20 | >>> rng = 0
21 | >>> boxes = random_boxes(num, scale, rng)
22 | >>> print(boxes)
23 | tensor([[280.9925, 278.9802, 308.6148, 366.1769],
24 | [216.9113, 330.6978, 224.0446, 456.5878],
25 | [405.3632, 196.3221, 493.3953, 270.7942]])
26 | """
27 | rng = ensure_rng(rng)
28 |
29 | tlbr = rng.rand(num, 4).astype(np.float32)
30 |
31 | tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
32 | tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
33 | br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
34 | br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
35 |
36 | tlbr[:, 0] = tl_x * scale
37 | tlbr[:, 1] = tl_y * scale
38 | tlbr[:, 2] = br_x * scale
39 | tlbr[:, 3] = br_y * scale
40 |
41 | boxes = torch.from_numpy(tlbr)
42 | return boxes
43 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/iou_calculators/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import build_iou_calculator
3 | from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps
4 |
5 | __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']
6 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/iou_calculators/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.utils import Registry, build_from_cfg
3 |
4 | IOU_CALCULATORS = Registry('IoU calculator')
5 |
6 |
7 | def build_iou_calculator(cfg, default_args=None):
8 | """Builder of IoU calculator."""
9 | return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/match_costs/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import build_match_cost
3 | from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,
4 | DiceCost, FocalLossCost, IoUCost)
5 |
6 | __all__ = [
7 | 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
8 | 'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/match_costs/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.utils import Registry, build_from_cfg
3 |
4 | MATCH_COST = Registry('Match Cost')
5 |
6 |
7 | def build_match_cost(cfg, default_args=None):
8 | """Builder of IoU calculator."""
9 | return build_from_cfg(cfg, MATCH_COST, default_args)
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/samplers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_sampler import BaseSampler
3 | from .combined_sampler import CombinedSampler
4 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
5 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler
6 | from .mask_pseudo_sampler import MaskPseudoSampler
7 | from .mask_sampling_result import MaskSamplingResult
8 | from .ohem_sampler import OHEMSampler
9 | from .pseudo_sampler import PseudoSampler
10 | from .random_sampler import RandomSampler
11 | from .sampling_result import SamplingResult
12 | from .score_hlr_sampler import ScoreHLRSampler
13 |
14 | __all__ = [
15 | 'BaseSampler', 'PseudoSampler', 'RandomSampler',
16 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
17 | 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',
18 | 'MaskSamplingResult'
19 | ]
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/bbox/samplers/combined_sampler.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import BBOX_SAMPLERS, build_sampler
3 | from .base_sampler import BaseSampler
4 |
5 |
6 | @BBOX_SAMPLERS.register_module()
7 | class CombinedSampler(BaseSampler):
8 | """A sampler that combines positive sampler and negative sampler."""
9 |
10 | def __init__(self, pos_sampler, neg_sampler, **kwargs):
11 | super(CombinedSampler, self).__init__(**kwargs)
12 | self.pos_sampler = build_sampler(pos_sampler, **kwargs)
13 | self.neg_sampler = build_sampler(neg_sampler, **kwargs)
14 |
15 | def _sample_pos(self, **kwargs):
16 | """Sample positive samples."""
17 | raise NotImplementedError
18 |
19 | def _sample_neg(self, **kwargs):
20 | """Sample negative samples."""
21 | raise NotImplementedError
22 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/data_structures/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .general_data import GeneralData
3 | from .instance_data import InstanceData
4 |
5 | __all__ = ['GeneralData', 'InstanceData']
6 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
3 | get_classes, imagenet_det_classes,
4 | imagenet_vid_classes, oid_challenge_classes,
5 | oid_v6_classes, voc_classes)
6 | from .eval_hooks import DistEvalHook, EvalHook
7 | from .mean_ap import average_precision, eval_map, print_map_summary
8 | from .panoptic_utils import INSTANCE_OFFSET
9 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
10 | print_recall_summary)
11 |
12 | __all__ = [
13 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
14 | 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
15 | 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
16 | 'print_map_summary', 'eval_recalls', 'print_recall_summary',
17 | 'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes',
18 | 'oid_challenge_classes', 'INSTANCE_OFFSET'
19 | ]
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/evaluation/panoptic_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | # A custom value to distinguish instance ID and category ID; need to
3 | # be greater than the number of categories.
4 | # For a pixel in the panoptic result map:
5 | # pan_id = ins_id * INSTANCE_OFFSET + cat_id
6 | INSTANCE_OFFSET = 1000
7 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/export/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx,
3 | get_k_for_topk)
4 | from .pytorch2onnx import (build_model_from_cfg,
5 | generate_inputs_and_wrap_model,
6 | preprocess_example_input)
7 |
8 | __all__ = [
9 | 'build_model_from_cfg', 'generate_inputs_and_wrap_model',
10 | 'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx',
11 | 'dynamic_clip_for_onnx'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/hook/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .checkloss_hook import CheckInvalidLossHook
3 | from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
4 | from .set_epoch_info_hook import SetEpochInfoHook
5 | from .sync_norm_hook import SyncNormHook
6 | from .sync_random_size_hook import SyncRandomSizeHook
7 | from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
8 | from .yolox_mode_switch_hook import YOLOXModeSwitchHook
9 |
10 | __all__ = [
11 | 'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
12 | 'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
13 | 'CheckInvalidLossHook', 'SetEpochInfoHook'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/hook/checkloss_hook.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 | from mmcv.runner.hooks import HOOKS, Hook
4 |
5 |
6 | @HOOKS.register_module()
7 | class CheckInvalidLossHook(Hook):
8 | """Check invalid loss hook.
9 |
10 | This hook will regularly check whether the loss is valid
11 | during training.
12 |
13 | Args:
14 | interval (int): Checking interval (every k iterations).
15 | Default: 50.
16 | """
17 |
18 | def __init__(self, interval=50):
19 | self.interval = interval
20 |
21 | def after_train_iter(self, runner):
22 | if self.every_n_iters(runner, self.interval):
23 | assert torch.isfinite(runner.outputs['loss']), \
24 | runner.logger.info('loss become infinite or NaN!')
25 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/hook/set_epoch_info_hook.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.parallel import is_module_wrapper
3 | from mmcv.runner import HOOKS, Hook
4 |
5 |
6 | @HOOKS.register_module()
7 | class SetEpochInfoHook(Hook):
8 | """Set runner's epoch information to the model."""
9 |
10 | def before_train_epoch(self, runner):
11 | epoch = runner.epoch
12 | model = runner.model
13 | if is_module_wrapper(model):
14 | model = model.module
15 | model.set_epoch(epoch)
16 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/mask/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .mask_target import mask_target
3 | from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks
4 | from .utils import encode_mask_results, mask2bbox, split_combined_polys
5 |
6 | __all__ = [
7 | 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
8 | 'PolygonMasks', 'encode_mask_results', 'mask2bbox'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/post_processing/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bbox_nms import fast_nms, multiclass_nms
3 | from .matrix_nms import mask_matrix_nms
4 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
5 | merge_aug_proposals, merge_aug_scores)
6 |
7 | __all__ = [
8 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
9 | 'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms'
10 | ]
11 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
3 | reduce_mean, sync_random_seed)
4 | from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
5 | generate_coordinate, mask2ndarray, multi_apply,
6 | select_single_mlvl, unmap)
7 |
8 | __all__ = [
9 | 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
10 | 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
11 | 'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
12 | 'filter_scores_and_topk', 'sync_random_seed'
13 | ]
14 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/core/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .image import (color_val_matplotlib, imshow_det_bboxes,
3 | imshow_gt_det_bboxes)
4 | from .palette import get_palette, palette_val
5 |
6 | __all__ = [
7 | 'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib',
8 | 'palette_val', 'get_palette'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/datasets/api_wrappers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .coco_api import COCO, COCOeval
3 | from .panoptic_evaluation import pq_compute_multi_core, pq_compute_single_core
4 |
5 | __all__ = [
6 | 'COCO', 'COCOeval', 'pq_compute_multi_core', 'pq_compute_single_core'
7 | ]
8 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/datasets/deepfashion.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import DATASETS
3 | from .coco import CocoDataset
4 |
5 |
6 | @DATASETS.register_module()
7 | class DeepFashionDataset(CocoDataset):
8 |
9 | CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
10 | 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
11 | 'skin', 'face')
12 |
13 | PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
14 | (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
15 | (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
16 | (128, 0, 96), (128, 0, 192), (0, 32, 192)]
17 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/datasets/pipelines/formating.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | # flake8: noqa
3 | import warnings
4 |
5 | from .formatting import *
6 |
7 | warnings.warn('DeprecationWarning: mmdet.datasets.pipelines.formating will be '
8 | 'deprecated, please replace it with '
9 | 'mmdet.datasets.pipelines.formatting.')
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/datasets/samplers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .distributed_sampler import DistributedSampler
3 | from .group_sampler import DistributedGroupSampler, GroupSampler
4 | from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler
5 |
6 | __all__ = [
7 | 'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',
8 | 'InfiniteGroupBatchSampler', 'InfiniteBatchSampler'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .backbones import * # noqa: F401,F403
3 | from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
4 | ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
5 | build_detector, build_head, build_loss, build_neck,
6 | build_roi_extractor, build_shared_head)
7 | from .dense_heads import * # noqa: F401,F403
8 | from .detectors import * # noqa: F401,F403
9 | from .losses import * # noqa: F401,F403
10 | from .necks import * # noqa: F401,F403
11 | from .plugins import * # noqa: F401,F403
12 | from .roi_heads import * # noqa: F401,F403
13 | from .seg_heads import * # noqa: F401,F403
14 |
15 | __all__ = [
16 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
17 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
18 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector'
19 | ]
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .csp_darknet import CSPDarknet
3 | from .darknet import Darknet
4 | from .detectors_resnet import DetectoRS_ResNet
5 | from .detectors_resnext import DetectoRS_ResNeXt
6 | from .efficientnet import EfficientNet
7 | from .hourglass import HourglassNet
8 | from .hrnet import HRNet
9 | from .mobilenet_v2 import MobileNetV2
10 | from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
11 | from .regnet import RegNet
12 | from .res2net import Res2Net
13 | from .resnest import ResNeSt
14 | from .resnet import ResNet, ResNetV1d
15 | from .resnext import ResNeXt
16 | from .ssd_vgg import SSDVGG
17 | from .swin import SwinTransformer
18 | from .trident_resnet import TridentResNet
19 | from .wave_dwconv import WaveDWNet
20 | from .mix_regnetg import RegNetG
21 | from .vit import VisionTransformer
22 |
23 | __all__ = [
24 | 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
25 | 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
26 | 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
27 | 'SwinTransformer', 'PyramidVisionTransformer',
28 | 'PyramidVisionTransformerV2', 'EfficientNet', 'WaveDWNet', 'RegNetG',
29 | 'VisionTransformer',
30 | ]
31 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/datasets/api_wrappers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .coco_api import COCO, COCOeval
3 | from .panoptic_evaluation import pq_compute_multi_core, pq_compute_single_core
4 |
5 | __all__ = [
6 | 'COCO', 'COCOeval', 'pq_compute_multi_core', 'pq_compute_single_core'
7 | ]
8 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/datasets/deepfashion.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import DATASETS
3 | from .coco import CocoDataset
4 |
5 |
6 | @DATASETS.register_module()
7 | class DeepFashionDataset(CocoDataset):
8 |
9 | CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
10 | 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
11 | 'skin', 'face')
12 |
13 | PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
14 | (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
15 | (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
16 | (128, 0, 96), (128, 0, 192), (0, 32, 192)]
17 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/datasets/pipelines/formating.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | # flake8: noqa
3 | import warnings
4 |
5 | from .formatting import *
6 |
7 | warnings.warn('DeprecationWarning: mmdet.datasets.pipelines.formating will be '
8 | 'deprecated, please replace it with '
9 | 'mmdet.datasets.pipelines.formatting.')
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/datasets/samplers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .distributed_sampler import DistributedSampler
3 | from .group_sampler import DistributedGroupSampler, GroupSampler
4 | from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler
5 |
6 | __all__ = [
7 | 'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',
8 | 'InfiniteGroupBatchSampler', 'InfiniteBatchSampler'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/atss.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class ATSS(SingleStageDetector):
8 | """Implementation of `ATSS `_."""
9 |
10 | def __init__(self,
11 | backbone,
12 | neck,
13 | bbox_head,
14 | train_cfg=None,
15 | test_cfg=None,
16 | pretrained=None,
17 | init_cfg=None):
18 | super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained, init_cfg)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/autoassign.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class AutoAssign(SingleStageDetector):
8 | """Implementation of `AutoAssign: Differentiable Label Assignment for Dense
9 | Object Detection `_."""
10 |
11 | def __init__(self,
12 | backbone,
13 | neck,
14 | bbox_head,
15 | train_cfg=None,
16 | test_cfg=None,
17 | pretrained=None):
18 | super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/deformable_detr.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .detr import DETR
4 |
5 |
6 | @DETECTORS.register_module()
7 | class DeformableDETR(DETR):
8 |
9 | def __init__(self, *args, **kwargs):
10 | super(DETR, self).__init__(*args, **kwargs)
11 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/faster_rcnn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .two_stage import TwoStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class FasterRCNN(TwoStageDetector):
8 | """Implementation of `Faster R-CNN `_"""
9 |
10 | def __init__(self,
11 | backbone,
12 | rpn_head,
13 | roi_head,
14 | train_cfg,
15 | test_cfg,
16 | neck=None,
17 | pretrained=None,
18 | init_cfg=None):
19 | super(FasterRCNN, self).__init__(
20 | backbone=backbone,
21 | neck=neck,
22 | rpn_head=rpn_head,
23 | roi_head=roi_head,
24 | train_cfg=train_cfg,
25 | test_cfg=test_cfg,
26 | pretrained=pretrained,
27 | init_cfg=init_cfg)
28 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/fcos.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class FCOS(SingleStageDetector):
8 | """Implementation of `FCOS `_"""
9 |
10 | def __init__(self,
11 | backbone,
12 | neck,
13 | bbox_head,
14 | train_cfg=None,
15 | test_cfg=None,
16 | pretrained=None,
17 | init_cfg=None):
18 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained, init_cfg)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/fovea.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class FOVEA(SingleStageDetector):
8 | """Implementation of `FoveaBox `_"""
9 |
10 | def __init__(self,
11 | backbone,
12 | neck,
13 | bbox_head,
14 | train_cfg=None,
15 | test_cfg=None,
16 | pretrained=None,
17 | init_cfg=None):
18 | super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained, init_cfg)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/fsaf.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class FSAF(SingleStageDetector):
8 | """Implementation of `FSAF `_"""
9 |
10 | def __init__(self,
11 | backbone,
12 | neck,
13 | bbox_head,
14 | train_cfg=None,
15 | test_cfg=None,
16 | pretrained=None,
17 | init_cfg=None):
18 | super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained, init_cfg)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/gfl.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class GFL(SingleStageDetector):
8 |
9 | def __init__(self,
10 | backbone,
11 | neck,
12 | bbox_head,
13 | train_cfg=None,
14 | test_cfg=None,
15 | pretrained=None,
16 | init_cfg=None):
17 | super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,
18 | test_cfg, pretrained, init_cfg)
19 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/grid_rcnn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .two_stage import TwoStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class GridRCNN(TwoStageDetector):
8 | """Grid R-CNN.
9 |
10 | This detector is the implementation of:
11 | - Grid R-CNN (https://arxiv.org/abs/1811.12030)
12 | - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
13 | """
14 |
15 | def __init__(self,
16 | backbone,
17 | rpn_head,
18 | roi_head,
19 | train_cfg,
20 | test_cfg,
21 | neck=None,
22 | pretrained=None,
23 | init_cfg=None):
24 | super(GridRCNN, self).__init__(
25 | backbone=backbone,
26 | neck=neck,
27 | rpn_head=rpn_head,
28 | roi_head=roi_head,
29 | train_cfg=train_cfg,
30 | test_cfg=test_cfg,
31 | pretrained=pretrained,
32 | init_cfg=init_cfg)
33 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/htc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .cascade_rcnn import CascadeRCNN
4 |
5 |
6 | @DETECTORS.register_module()
7 | class HybridTaskCascade(CascadeRCNN):
8 | """Implementation of `HTC `_"""
9 |
10 | def __init__(self, **kwargs):
11 | super(HybridTaskCascade, self).__init__(**kwargs)
12 |
13 | @property
14 | def with_semantic(self):
15 | """bool: whether the detector has a semantic head"""
16 | return self.roi_head.with_semantic
17 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/mask2former.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .maskformer import MaskFormer
4 |
5 |
6 | @DETECTORS.register_module()
7 | class Mask2Former(MaskFormer):
8 | r"""Implementation of `Masked-attention Mask
9 | Transformer for Universal Image Segmentation
10 | `_."""
11 |
12 | def __init__(self,
13 | backbone,
14 | neck=None,
15 | panoptic_head=None,
16 | panoptic_fusion_head=None,
17 | train_cfg=None,
18 | test_cfg=None,
19 | init_cfg=None):
20 | super().__init__(
21 | backbone,
22 | neck=neck,
23 | panoptic_head=panoptic_head,
24 | panoptic_fusion_head=panoptic_fusion_head,
25 | train_cfg=train_cfg,
26 | test_cfg=test_cfg,
27 | init_cfg=init_cfg)
28 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/mask_rcnn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .two_stage import TwoStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class MaskRCNN(TwoStageDetector):
8 | """Implementation of `Mask R-CNN `_"""
9 |
10 | def __init__(self,
11 | backbone,
12 | rpn_head,
13 | roi_head,
14 | train_cfg,
15 | test_cfg,
16 | neck=None,
17 | pretrained=None,
18 | init_cfg=None):
19 | super(MaskRCNN, self).__init__(
20 | backbone=backbone,
21 | neck=neck,
22 | rpn_head=rpn_head,
23 | roi_head=roi_head,
24 | train_cfg=train_cfg,
25 | test_cfg=test_cfg,
26 | pretrained=pretrained,
27 | init_cfg=init_cfg)
28 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/mask_scoring_rcnn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .two_stage import TwoStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class MaskScoringRCNN(TwoStageDetector):
8 | """Mask Scoring RCNN.
9 |
10 | https://arxiv.org/abs/1903.00241
11 | """
12 |
13 | def __init__(self,
14 | backbone,
15 | rpn_head,
16 | roi_head,
17 | train_cfg,
18 | test_cfg,
19 | neck=None,
20 | pretrained=None,
21 | init_cfg=None):
22 | super(MaskScoringRCNN, self).__init__(
23 | backbone=backbone,
24 | neck=neck,
25 | rpn_head=rpn_head,
26 | roi_head=roi_head,
27 | train_cfg=train_cfg,
28 | test_cfg=test_cfg,
29 | pretrained=pretrained,
30 | init_cfg=init_cfg)
31 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/nasfcos.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class NASFCOS(SingleStageDetector):
8 | """NAS-FCOS: Fast Neural Architecture Search for Object Detection.
9 |
10 | https://arxiv.org/abs/1906.0442
11 | """
12 |
13 | def __init__(self,
14 | backbone,
15 | neck,
16 | bbox_head,
17 | train_cfg=None,
18 | test_cfg=None,
19 | pretrained=None,
20 | init_cfg=None):
21 | super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
22 | test_cfg, pretrained, init_cfg)
23 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/paa.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class PAA(SingleStageDetector):
8 | """Implementation of `PAA `_."""
9 |
10 | def __init__(self,
11 | backbone,
12 | neck,
13 | bbox_head,
14 | train_cfg=None,
15 | test_cfg=None,
16 | pretrained=None,
17 | init_cfg=None):
18 | super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained, init_cfg)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/panoptic_fpn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
4 |
5 |
6 | @DETECTORS.register_module()
7 | class PanopticFPN(TwoStagePanopticSegmentor):
8 | r"""Implementation of `Panoptic feature pyramid
9 | networks `_"""
10 |
11 | def __init__(
12 | self,
13 | backbone,
14 | neck=None,
15 | rpn_head=None,
16 | roi_head=None,
17 | train_cfg=None,
18 | test_cfg=None,
19 | pretrained=None,
20 | init_cfg=None,
21 | # for panoptic segmentation
22 | semantic_head=None,
23 | panoptic_fusion_head=None):
24 | super(PanopticFPN, self).__init__(
25 | backbone=backbone,
26 | neck=neck,
27 | rpn_head=rpn_head,
28 | roi_head=roi_head,
29 | train_cfg=train_cfg,
30 | test_cfg=test_cfg,
31 | pretrained=pretrained,
32 | init_cfg=init_cfg,
33 | semantic_head=semantic_head,
34 | panoptic_fusion_head=panoptic_fusion_head)
35 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/point_rend.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .two_stage import TwoStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class PointRend(TwoStageDetector):
8 | """PointRend: Image Segmentation as Rendering
9 |
10 | This detector is the implementation of
11 | `PointRend `_.
12 |
13 | """
14 |
15 | def __init__(self,
16 | backbone,
17 | rpn_head,
18 | roi_head,
19 | train_cfg,
20 | test_cfg,
21 | neck=None,
22 | pretrained=None,
23 | init_cfg=None):
24 | super(PointRend, self).__init__(
25 | backbone=backbone,
26 | neck=neck,
27 | rpn_head=rpn_head,
28 | roi_head=roi_head,
29 | train_cfg=train_cfg,
30 | test_cfg=test_cfg,
31 | pretrained=pretrained,
32 | init_cfg=init_cfg)
33 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/queryinst.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .sparse_rcnn import SparseRCNN
4 |
5 |
6 | @DETECTORS.register_module()
7 | class QueryInst(SparseRCNN):
8 | r"""Implementation of
9 | `Instances as Queries `_"""
10 |
11 | def __init__(self,
12 | backbone,
13 | rpn_head,
14 | roi_head,
15 | train_cfg,
16 | test_cfg,
17 | neck=None,
18 | pretrained=None,
19 | init_cfg=None):
20 | super(QueryInst, self).__init__(
21 | backbone=backbone,
22 | neck=neck,
23 | rpn_head=rpn_head,
24 | roi_head=roi_head,
25 | train_cfg=train_cfg,
26 | test_cfg=test_cfg,
27 | pretrained=pretrained,
28 | init_cfg=init_cfg)
29 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/reppoints_detector.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class RepPointsDetector(SingleStageDetector):
8 | """RepPoints: Point Set Representation for Object Detection.
9 |
10 | This detector is the implementation of:
11 | - RepPoints detector (https://arxiv.org/pdf/1904.11490)
12 | """
13 |
14 | def __init__(self,
15 | backbone,
16 | neck,
17 | bbox_head,
18 | train_cfg=None,
19 | test_cfg=None,
20 | pretrained=None,
21 | init_cfg=None):
22 | super(RepPointsDetector,
23 | self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
24 | pretrained, init_cfg)
25 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/retinanet.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class RetinaNet(SingleStageDetector):
8 | """Implementation of `RetinaNet `_"""
9 |
10 | def __init__(self,
11 | backbone,
12 | neck,
13 | bbox_head,
14 | train_cfg=None,
15 | test_cfg=None,
16 | pretrained=None,
17 | init_cfg=None):
18 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained, init_cfg)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/scnet.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .cascade_rcnn import CascadeRCNN
4 |
5 |
6 | @DETECTORS.register_module()
7 | class SCNet(CascadeRCNN):
8 | """Implementation of `SCNet `_"""
9 |
10 | def __init__(self, **kwargs):
11 | super(SCNet, self).__init__(**kwargs)
12 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/solo.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage_instance_seg import SingleStageInstanceSegmentor
4 |
5 |
6 | @DETECTORS.register_module()
7 | class SOLO(SingleStageInstanceSegmentor):
8 | """`SOLO: Segmenting Objects by Locations
9 | `_
10 |
11 | """
12 |
13 | def __init__(self,
14 | backbone,
15 | neck=None,
16 | bbox_head=None,
17 | mask_head=None,
18 | train_cfg=None,
19 | test_cfg=None,
20 | init_cfg=None,
21 | pretrained=None):
22 | super().__init__(
23 | backbone=backbone,
24 | neck=neck,
25 | bbox_head=bbox_head,
26 | mask_head=mask_head,
27 | train_cfg=train_cfg,
28 | test_cfg=test_cfg,
29 | init_cfg=init_cfg,
30 | pretrained=pretrained)
31 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/tood.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class TOOD(SingleStageDetector):
8 | r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
9 | `_."""
10 |
11 | def __init__(self,
12 | backbone,
13 | neck,
14 | bbox_head,
15 | train_cfg=None,
16 | test_cfg=None,
17 | pretrained=None,
18 | init_cfg=None):
19 | super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg,
20 | test_cfg, pretrained, init_cfg)
21 |
22 | def set_epoch(self, epoch):
23 | self.bbox_head.epoch = epoch
24 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/vfnet.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class VFNet(SingleStageDetector):
8 | """Implementation of `VarifocalNet
9 | (VFNet).`_"""
10 |
11 | def __init__(self,
12 | backbone,
13 | neck,
14 | bbox_head,
15 | train_cfg=None,
16 | test_cfg=None,
17 | pretrained=None,
18 | init_cfg=None):
19 | super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
20 | test_cfg, pretrained, init_cfg)
21 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/detectors/yolof.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import DETECTORS
3 | from .single_stage import SingleStageDetector
4 |
5 |
6 | @DETECTORS.register_module()
7 | class YOLOF(SingleStageDetector):
8 | r"""Implementation of `You Only Look One-level Feature
9 | `_"""
10 |
11 | def __init__(self,
12 | backbone,
13 | neck,
14 | bbox_head,
15 | train_cfg=None,
16 | test_cfg=None,
17 | pretrained=None):
18 | super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg,
19 | test_cfg, pretrained)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bfp import BFP
3 | from .channel_mapper import ChannelMapper
4 | from .ct_resnet_neck import CTResNetNeck
5 | from .dilated_encoder import DilatedEncoder
6 | from .dyhead import DyHead
7 | from .fpg import FPG
8 | from .fpn import FPN
9 | from .fpn_carafe import FPN_CARAFE
10 | from .hrfpn import HRFPN
11 | from .nas_fpn import NASFPN
12 | from .nasfcos_fpn import NASFCOS_FPN
13 | from .pafpn import PAFPN
14 | from .rfp import RFP
15 | from .ssd_neck import SSDNeck
16 | from .yolo_neck import YOLOV3Neck
17 | from .yolox_pafpn import YOLOXPAFPN
18 |
19 | __all__ = [
20 | 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
21 | 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
22 | 'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead'
23 | ]
24 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dropblock import DropBlock
3 | from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
4 | from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
5 |
6 | __all__ = [
7 | 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder',
8 | 'MSDeformAttnPixelDecoder'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/roi_heads/bbox_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bbox_head import BBoxHead
3 | from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
4 | Shared4Conv1FCBBoxHead)
5 | from .dii_head import DIIHead
6 | from .double_bbox_head import DoubleConvFCBBoxHead
7 | from .sabl_head import SABLHead
8 | from .scnet_bbox_head import SCNetBBoxHead
9 |
10 | __all__ = [
11 | 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
12 | 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
13 | 'SCNetBBoxHead'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/roi_heads/double_roi_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ..builder import HEADS
3 | from .standard_roi_head import StandardRoIHead
4 |
5 |
6 | @HEADS.register_module()
7 | class DoubleHeadRoIHead(StandardRoIHead):
8 | """RoI head for Double Head RCNN.
9 |
10 | https://arxiv.org/abs/1904.06493
11 | """
12 |
13 | def __init__(self, reg_roi_scale_factor, **kwargs):
14 | super(DoubleHeadRoIHead, self).__init__(**kwargs)
15 | self.reg_roi_scale_factor = reg_roi_scale_factor
16 |
17 | def _bbox_forward(self, x, rois):
18 | """Box head forward function used in both training and testing time."""
19 | bbox_cls_feats = self.bbox_roi_extractor(
20 | x[:self.bbox_roi_extractor.num_inputs], rois)
21 | bbox_reg_feats = self.bbox_roi_extractor(
22 | x[:self.bbox_roi_extractor.num_inputs],
23 | rois,
24 | roi_scale_factor=self.reg_roi_scale_factor)
25 | if self.with_shared_head:
26 | bbox_cls_feats = self.shared_head(bbox_cls_feats)
27 | bbox_reg_feats = self.shared_head(bbox_reg_feats)
28 | cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
29 |
30 | bbox_results = dict(
31 | cls_score=cls_score,
32 | bbox_pred=bbox_pred,
33 | bbox_feats=bbox_cls_feats)
34 | return bbox_results
35 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/roi_heads/mask_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .coarse_mask_head import CoarseMaskHead
3 | from .dynamic_mask_head import DynamicMaskHead
4 | from .fcn_mask_head import FCNMaskHead
5 | from .feature_relay_head import FeatureRelayHead
6 | from .fused_semantic_head import FusedSemanticHead
7 | from .global_context_head import GlobalContextHead
8 | from .grid_head import GridHead
9 | from .htc_mask_head import HTCMaskHead
10 | from .mask_point_head import MaskPointHead
11 | from .maskiou_head import MaskIoUHead
12 | from .scnet_mask_head import SCNetMaskHead
13 | from .scnet_semantic_head import SCNetSemanticHead
14 |
15 | __all__ = [
16 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
17 | 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
18 | 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead',
19 | 'DynamicMaskHead'
20 | ]
21 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.builder import HEADS
3 | from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
4 | from .fcn_mask_head import FCNMaskHead
5 |
6 |
7 | @HEADS.register_module()
8 | class SCNetMaskHead(FCNMaskHead):
9 | """Mask head for `SCNet `_.
10 |
11 | Args:
12 | conv_to_res (bool, optional): if True, change the conv layers to
13 | ``SimplifiedBasicBlock``.
14 | """
15 |
16 | def __init__(self, conv_to_res=True, **kwargs):
17 | super(SCNetMaskHead, self).__init__(**kwargs)
18 | self.conv_to_res = conv_to_res
19 | if conv_to_res:
20 | assert self.conv_kernel_size == 3
21 | self.num_res_blocks = self.num_convs // 2
22 | self.convs = ResLayer(
23 | SimplifiedBasicBlock,
24 | self.in_channels,
25 | self.conv_out_channels,
26 | self.num_res_blocks,
27 | conv_cfg=self.conv_cfg,
28 | norm_cfg=self.norm_cfg)
29 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.builder import HEADS
3 | from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
4 | from .fused_semantic_head import FusedSemanticHead
5 |
6 |
7 | @HEADS.register_module()
8 | class SCNetSemanticHead(FusedSemanticHead):
9 | """Mask head for `SCNet `_.
10 |
11 | Args:
12 | conv_to_res (bool, optional): if True, change the conv layers to
13 | ``SimplifiedBasicBlock``.
14 | """
15 |
16 | def __init__(self, conv_to_res=True, **kwargs):
17 | super(SCNetSemanticHead, self).__init__(**kwargs)
18 | self.conv_to_res = conv_to_res
19 | if self.conv_to_res:
20 | num_res_blocks = self.num_convs // 2
21 | self.convs = ResLayer(
22 | SimplifiedBasicBlock,
23 | self.in_channels,
24 | self.conv_out_channels,
25 | num_res_blocks,
26 | conv_cfg=self.conv_cfg,
27 | norm_cfg=self.norm_cfg)
28 | self.num_convs = num_res_blocks
29 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/roi_heads/roi_extractors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_roi_extractor import BaseRoIExtractor
3 | from .generic_roi_extractor import GenericRoIExtractor
4 | from .single_level_roi_extractor import SingleRoIExtractor
5 |
6 | __all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor']
7 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/roi_heads/shared_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .res_layer import ResLayer
3 |
4 | __all__ = ['ResLayer']
5 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/seg_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403
3 | from .panoptic_fusion_heads import * # noqa: F401,F403
4 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_panoptic_fusion_head import \
3 | BasePanopticFusionHead # noqa: F401,F403
4 | from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403
5 | from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403
6 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/models/utils/make_divisible.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
3 | """Make divisible function.
4 |
5 | This function rounds the channel number to the nearest value that can be
6 | divisible by the divisor. It is taken from the original tf repo. It ensures
7 | that all layers have a channel number that is divisible by divisor. It can
8 | be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa
9 |
10 | Args:
11 | value (int): The original channel number.
12 | divisor (int): The divisor to fully divide the channel number.
13 | min_value (int): The minimum value of the output channel.
14 | Default: None, means that the minimum value equal to the divisor.
15 | min_ratio (float): The minimum ratio of the rounded channel number to
16 | the original channel number. Default: 0.9.
17 |
18 | Returns:
19 | int: The modified output channel number.
20 | """
21 |
22 | if min_value is None:
23 | min_value = divisor
24 | new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
25 | # Make sure that round down does not go down by more than (1-min_ratio).
26 | if new_value < min_ratio * value:
27 | new_value += divisor
28 | return new_value
29 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .collect_env import collect_env
3 | from .logger import get_caller_name, get_root_logger, log_img_scale
4 | from .misc import find_latest_checkpoint, update_data_root
5 | from .setup_env import setup_multi_processes
6 |
7 | __all__ = [
8 | 'get_root_logger', 'collect_env', 'find_latest_checkpoint',
9 | 'update_data_root', 'setup_multi_processes', 'get_caller_name',
10 | 'log_img_scale'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/utils/collect_env.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.utils import collect_env as collect_base_env
3 | from mmcv.utils import get_git_hash
4 |
5 | import mmdet
6 |
7 |
8 | def collect_env():
9 | """Collect the information of the running environments."""
10 | env_info = collect_base_env()
11 | env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
12 | return env_info
13 |
14 |
15 | if __name__ == '__main__':
16 | for name, val in collect_env().items():
17 | print(f'{name}: {val}')
18 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/utils/util_random.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | """Helpers for random number generators."""
3 | import numpy as np
4 |
5 |
6 | def ensure_rng(rng=None):
7 | """Coerces input into a random number generator.
8 |
9 | If the input is None, then a global random state is returned.
10 |
11 | If the input is a numeric value, then that is used as a seed to construct a
12 | random state. Otherwise the input is returned as-is.
13 |
14 | Adapted from [1]_.
15 |
16 | Args:
17 | rng (int | numpy.random.RandomState | None):
18 | if None, then defaults to the global rng. Otherwise this can be an
19 | integer or a RandomState class
20 | Returns:
21 | (numpy.random.RandomState) : rng -
22 | a numpy random number generator
23 |
24 | References:
25 | .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
26 | """
27 |
28 | if rng is None:
29 | rng = np.random.mtrand._rand
30 | elif isinstance(rng, int):
31 | rng = np.random.RandomState(rng)
32 | else:
33 | rng = rng
34 | return rng
35 |
--------------------------------------------------------------------------------
/mmdet-v2.23/mmdet/version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | __version__ = '2.23.0'
4 | short_version = __version__
5 |
6 |
7 | def parse_version_info(version_str):
8 | version_info = []
9 | for x in version_str.split('.'):
10 | if x.isdigit():
11 | version_info.append(int(x))
12 | elif x.find('rc') != -1:
13 | patch_version = x.split('rc')
14 | version_info.append(int(patch_version[0]))
15 | version_info.append(f'rc{patch_version[1]}')
16 | return tuple(version_info)
17 |
18 |
19 | version_info = parse_version_info(__version__)
20 |
--------------------------------------------------------------------------------
/mmdet-v2.23/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | addopts = --xdoctest --xdoctest-style=auto
3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs
4 |
5 | filterwarnings= default
6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning
7 | ignore:.*Define the __nice__ method for.*:Warning
8 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements.txt:
--------------------------------------------------------------------------------
1 | -r requirements/build.txt
2 | -r requirements/optional.txt
3 | -r requirements/runtime.txt
4 | -r requirements/tests.txt
5 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/albu.txt:
--------------------------------------------------------------------------------
1 | albumentations>=0.3.2 --no-binary qudida,albumentations
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/build.txt:
--------------------------------------------------------------------------------
1 | # These must be installed before building mmdetection
2 | cython
3 | numpy
4 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/docs.txt:
--------------------------------------------------------------------------------
1 | docutils==0.16.0
2 | -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
3 | recommonmark
4 | sphinx==4.0.2
5 | sphinx-copybutton
6 | sphinx_markdown_tables
7 | sphinx_rtd_theme==0.5.2
8 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/mminstall.txt:
--------------------------------------------------------------------------------
1 | mmcv-full>=1.3.17
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/optional.txt:
--------------------------------------------------------------------------------
1 | cityscapesscripts
2 | imagecorruptions
3 | scipy
4 | sklearn
5 | timm
6 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/readthedocs.txt:
--------------------------------------------------------------------------------
1 | mmcv
2 | torch
3 | torchvision
4 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/runtime.txt:
--------------------------------------------------------------------------------
1 | matplotlib
2 | numpy
3 | pycocotools
4 | six
5 | terminaltables
6 |
--------------------------------------------------------------------------------
/mmdet-v2.23/requirements/tests.txt:
--------------------------------------------------------------------------------
1 | asynctest
2 | codecov
3 | flake8
4 | interrogate
5 | isort==4.3.21
6 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future.
7 | kwarray
8 | -e git+https://github.com/open-mmlab/mmtracking#egg=mmtrack
9 | onnx==1.7.0
10 | onnxruntime>=1.8.0
11 | pytest
12 | ubelt
13 | xdoctest>=0.10.0
14 | yapf
15 |
--------------------------------------------------------------------------------
/mmdet-v2.23/resources/coco_test_12510.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/resources/coco_test_12510.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/resources/corruptions_sev_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/resources/corruptions_sev_3.png
--------------------------------------------------------------------------------
/mmdet-v2.23/resources/data_pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/resources/data_pipeline.png
--------------------------------------------------------------------------------
/mmdet-v2.23/resources/loss_curve.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/resources/loss_curve.png
--------------------------------------------------------------------------------
/mmdet-v2.23/resources/mmdet-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/resources/mmdet-logo.png
--------------------------------------------------------------------------------
/mmdet-v2.23/resources/qq_group_qrcode.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/resources/qq_group_qrcode.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/resources/zhihu_qrcode.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/resources/zhihu_qrcode.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/setup.cfg:
--------------------------------------------------------------------------------
1 | [isort]
2 | line_length = 79
3 | multi_line_output = 0
4 | extra_standard_library = setuptools
5 | known_first_party = mmdet
6 | known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml
7 | no_lines_before = STDLIB,LOCALFOLDER
8 | default_section = THIRDPARTY
9 |
10 | [yapf]
11 | BASED_ON_STYLE = pep8
12 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
13 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
14 |
15 | [codespell]
16 | skip = *.ipynb
17 | quiet-level = 3
18 | ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood
19 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2007/Annotations/000001.xml:
--------------------------------------------------------------------------------
1 |
2 | VOC2007
3 | 000001.jpg
4 |
5 | The VOC2007 Database
6 | PASCAL VOC2007
7 | flickr
8 | 341012865
9 |
10 |
11 | Fried Camels
12 | Jinky the Fruit Bat
13 |
14 |
15 | 353
16 | 500
17 | 3
18 |
19 | 0
20 |
32 |
44 |
45 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2007/ImageSets/Main/test.txt:
--------------------------------------------------------------------------------
1 | 000001
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt:
--------------------------------------------------------------------------------
1 | 000001
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2007/JPEGImages/000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/data/VOCdevkit/VOC2007/JPEGImages/000001.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2012/Annotations/000001.xml:
--------------------------------------------------------------------------------
1 |
2 | VOC2007
3 | 000002.jpg
4 |
5 | The VOC2007 Database
6 | PASCAL VOC2007
7 | flickr
8 | 329145082
9 |
10 |
11 | hiromori2
12 | Hiroyuki Mori
13 |
14 |
15 | 335
16 | 500
17 | 3
18 |
19 | 0
20 |
32 |
33 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2012/ImageSets/Main/test.txt:
--------------------------------------------------------------------------------
1 | 000001
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt:
--------------------------------------------------------------------------------
1 | 000001
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/VOCdevkit/VOC2012/JPEGImages/000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/data/VOCdevkit/VOC2012/JPEGImages/000001.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/color.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/data/color.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/custom_dataset/images/000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/data/custom_dataset/images/000001.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/custom_dataset/images/000001.xml:
--------------------------------------------------------------------------------
1 |
2 | VOC2007
3 | 000001.jpg
4 |
5 | The VOC2007 Database
6 | PASCAL VOC2007
7 | flickr
8 | 341012865
9 |
10 |
11 | Fried Camels
12 | Jinky the Fruit Bat
13 |
14 |
15 | 353
16 | 500
17 | 3
18 |
19 | 0
20 |
32 |
44 |
45 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/custom_dataset/test.txt:
--------------------------------------------------------------------------------
1 | 000001
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/custom_dataset/trainval.txt:
--------------------------------------------------------------------------------
1 | 000001
2 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/data/gray.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/data/gray.jpg
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_data/test_datasets/test_xml_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 |
4 | from mmdet.datasets import DATASETS
5 |
6 |
7 | def test_xml_dataset():
8 | dataconfig = {
9 | 'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
10 | 'img_prefix': 'data/VOCdevkit/VOC2007/',
11 | 'pipeline': [{
12 | 'type': 'LoadImageFromFile'
13 | }]
14 | }
15 | XMLDataset = DATASETS.get('XMLDataset')
16 |
17 | class XMLDatasetSubClass(XMLDataset):
18 | CLASSES = None
19 |
20 | # get_ann_info and _filter_imgs of XMLDataset
21 | # would use self.CLASSES, we added CLASSES not NONE
22 | with pytest.raises(AssertionError):
23 | XMLDatasetSubClass(**dataconfig)
24 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_data/test_pipelines/test_formatting.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import os.path as osp
3 |
4 | from mmcv.utils import build_from_cfg
5 |
6 | from mmdet.datasets.builder import PIPELINES
7 |
8 |
9 | def test_default_format_bundle():
10 | results = dict(
11 | img_prefix=osp.join(osp.dirname(__file__), '../../data'),
12 | img_info=dict(filename='color.jpg'))
13 | load = dict(type='LoadImageFromFile')
14 | load = build_from_cfg(load, PIPELINES)
15 | bundle = dict(type='DefaultFormatBundle')
16 | bundle = build_from_cfg(bundle, PIPELINES)
17 | results = load(results)
18 | assert 'pad_shape' not in results
19 | assert 'scale_factor' not in results
20 | assert 'img_norm_cfg' not in results
21 | results = bundle(results)
22 | assert 'pad_shape' in results
23 | assert 'scale_factor' in results
24 | assert 'img_norm_cfg' in results
25 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_data/test_pipelines/test_transform/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .utils import check_result_same, construct_toy_data, create_random_bboxes
3 |
4 | __all__ = ['create_random_bboxes', 'construct_toy_data', 'check_result_same']
5 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_models/test_backbones/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .utils import check_norm_state, is_block, is_norm
3 |
4 | __all__ = ['is_block', 'is_norm', 'check_norm_state']
5 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_models/test_backbones/test_efficientnet.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 |
4 | from mmdet.models.backbones import EfficientNet
5 |
6 |
7 | def test_efficientnet_backbone():
8 | """Test EfficientNet backbone."""
9 | with pytest.raises(AssertionError):
10 | # EfficientNet arch should be a key in EfficientNet.arch_settings
11 | EfficientNet(arch='c3')
12 |
13 | model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6))
14 | model.train()
15 |
16 | imgs = torch.randn(2, 3, 32, 32)
17 | feat = model(imgs)
18 | assert len(feat) == 7
19 | assert feat[0].shape == torch.Size([2, 32, 16, 16])
20 | assert feat[1].shape == torch.Size([2, 16, 16, 16])
21 | assert feat[2].shape == torch.Size([2, 24, 8, 8])
22 | assert feat[3].shape == torch.Size([2, 40, 4, 4])
23 | assert feat[4].shape == torch.Size([2, 112, 2, 2])
24 | assert feat[5].shape == torch.Size([2, 320, 1, 1])
25 | assert feat[6].shape == torch.Size([2, 1280, 1, 1])
26 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_models/test_backbones/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from torch.nn.modules import GroupNorm
3 | from torch.nn.modules.batchnorm import _BatchNorm
4 |
5 | from mmdet.models.backbones.res2net import Bottle2neck
6 | from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
7 | from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
8 | from mmdet.models.utils import SimplifiedBasicBlock
9 |
10 |
11 | def is_block(modules):
12 | """Check if is ResNet building block."""
13 | if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
14 | SimplifiedBasicBlock)):
15 | return True
16 | return False
17 |
18 |
19 | def is_norm(modules):
20 | """Check if is one of the norms."""
21 | if isinstance(modules, (GroupNorm, _BatchNorm)):
22 | return True
23 | return False
24 |
25 |
26 | def check_norm_state(modules, train_state):
27 | """Check if norm layer is in correct train state."""
28 | for mod in modules:
29 | if isinstance(mod, _BatchNorm):
30 | if mod.training != train_state:
31 | return False
32 | return True
33 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_models/test_roi_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .utils import _dummy_bbox_sampling
3 |
4 | __all__ = ['_dummy_bbox_sampling']
5 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_models/test_roi_heads/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmdet.core import build_assigner, build_sampler
5 |
6 |
7 | def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):
8 | """Create sample results that can be passed to BBoxHead.get_targets."""
9 | num_imgs = 1
10 | feat = torch.rand(1, 1, 3, 3)
11 | assign_config = dict(
12 | type='MaxIoUAssigner',
13 | pos_iou_thr=0.5,
14 | neg_iou_thr=0.5,
15 | min_pos_iou=0.5,
16 | ignore_iof_thr=-1)
17 | sampler_config = dict(
18 | type='RandomSampler',
19 | num=512,
20 | pos_fraction=0.25,
21 | neg_pos_ub=-1,
22 | add_gt_as_proposals=True)
23 | bbox_assigner = build_assigner(assign_config)
24 | bbox_sampler = build_sampler(sampler_config)
25 | gt_bboxes_ignore = [None for _ in range(num_imgs)]
26 | sampling_results = []
27 | for i in range(num_imgs):
28 | assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
29 | gt_bboxes_ignore[i], gt_labels[i])
30 | sampling_result = bbox_sampler.sample(
31 | assign_result,
32 | proposal_list[i],
33 | gt_bboxes[i],
34 | gt_labels[i],
35 | feats=feat)
36 | sampling_results.append(sampling_result)
37 |
38 | return sampling_results
39 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_models/test_utils/test_conv_upsample.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 | import torch
4 |
5 | from mmdet.models.utils import ConvUpsample
6 |
7 |
8 | @pytest.mark.parametrize('num_layers', [0, 1, 2])
9 | def test_conv_upsample(num_layers):
10 | num_upsample = num_layers if num_layers > 0 else 0
11 | num_layers = num_layers if num_layers > 0 else 1
12 | layer = ConvUpsample(
13 | 10,
14 | 5,
15 | num_layers=num_layers,
16 | num_upsample=num_upsample,
17 | conv_cfg=None,
18 | norm_cfg=None)
19 |
20 | size = 5
21 | x = torch.randn((1, 10, size, size))
22 | size = size * pow(2, num_upsample)
23 | x = layer(x)
24 | assert x.shape[-2:] == (size, size)
25 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_models/test_utils/test_model_misc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import numpy as np
3 | import torch
4 | from torch.autograd import gradcheck
5 |
6 | from mmdet.models.utils import interpolate_as, sigmoid_geometric_mean
7 |
8 |
9 | def test_interpolate_as():
10 | source = torch.rand((1, 5, 4, 4))
11 | target = torch.rand((1, 1, 16, 16))
12 |
13 | # Test 4D source and target
14 | result = interpolate_as(source, target)
15 | assert result.shape == torch.Size((1, 5, 16, 16))
16 |
17 | # Test 3D target
18 | result = interpolate_as(source, target.squeeze(0))
19 | assert result.shape == torch.Size((1, 5, 16, 16))
20 |
21 | # Test 3D source
22 | result = interpolate_as(source.squeeze(0), target)
23 | assert result.shape == torch.Size((5, 16, 16))
24 |
25 | # Test type(target) == np.ndarray
26 | target = np.random.rand(16, 16)
27 | result = interpolate_as(source.squeeze(0), target)
28 | assert result.shape == torch.Size((5, 16, 16))
29 |
30 |
31 | def test_sigmoid_geometric_mean():
32 | x = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
33 | y = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
34 | inputs = (x, y)
35 | test = gradcheck(sigmoid_geometric_mean, inputs, eps=1e-6, atol=1e-4)
36 | assert test
37 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_onnx/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .utils import ort_validate
3 |
4 | __all__ = ['ort_validate']
5 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_onnx/data/fsaf_head_get_bboxes.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/test_onnx/data/fsaf_head_get_bboxes.pkl
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_onnx/data/retina_head_get_bboxes.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/test_onnx/data/retina_head_get_bboxes.pkl
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_onnx/data/ssd_head_get_bboxes.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/test_onnx/data/ssd_head_get_bboxes.pkl
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_onnx/data/yolov3_head_get_bboxes.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/test_onnx/data/yolov3_head_get_bboxes.pkl
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_onnx/data/yolov3_neck.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmdet-v2.23/tests/test_onnx/data/yolov3_neck.pkl
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_utils/test_logger.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 |
4 | from mmdet.utils import get_caller_name, log_img_scale
5 |
6 |
7 | def callee_func():
8 | caller_name = get_caller_name()
9 | return caller_name
10 |
11 |
12 | class CallerClassForTest:
13 |
14 | def __init__(self):
15 | self.caller_name = callee_func()
16 |
17 |
18 | def test_get_caller_name():
19 | # test the case that caller is a function
20 | caller_name = callee_func()
21 | assert caller_name == 'test_get_caller_name'
22 |
23 | # test the case that caller is a method in a class
24 | caller_class = CallerClassForTest()
25 | assert caller_class.caller_name == 'CallerClassForTest.__init__'
26 |
27 |
28 | def test_log_img_scale():
29 | img_scale = (800, 1333)
30 | done_logging = log_img_scale(img_scale)
31 | assert done_logging
32 |
33 | img_scale = (1333, 800)
34 | done_logging = log_img_scale(img_scale, shape_order='wh')
35 | assert done_logging
36 |
37 | with pytest.raises(ValueError):
38 | img_scale = (1333, 800)
39 | done_logging = log_img_scale(img_scale, shape_order='xywh')
40 |
41 | img_scale = (640, 640)
42 | done_logging = log_img_scale(img_scale, skip_square=False)
43 | assert done_logging
44 |
45 | img_scale = (640, 640)
46 | done_logging = log_img_scale(img_scale, skip_square=True)
47 | assert not done_logging
48 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tests/test_utils/test_version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet import digit_version
3 |
4 |
5 | def test_version_check():
6 | assert digit_version('1.0.5') > digit_version('1.0.5rc0')
7 | assert digit_version('1.0.5') > digit_version('1.0.4rc0')
8 | assert digit_version('1.0.5') > digit_version('1.0rc0')
9 | assert digit_version('1.0.0') > digit_version('0.6.2')
10 | assert digit_version('1.0.0') > digit_version('0.2.16')
11 | assert digit_version('1.0.5rc0') > digit_version('1.0.0rc0')
12 | assert digit_version('1.0.0rc1') > digit_version('1.0.0rc0')
13 | assert digit_version('1.0.0rc2') > digit_version('1.0.0rc0')
14 | assert digit_version('1.0.0rc2') > digit_version('1.0.0rc1')
15 | assert digit_version('1.0.1rc1') > digit_version('1.0.0rc1')
16 | assert digit_version('1.0.0') > digit_version('1.0.0rc1')
17 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tools/dist_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | CONFIG=$1
4 | CHECKPOINT=$2
5 | GPUS=$3
6 | NNODES=${NNODES:-1}
7 | NODE_RANK=${NODE_RANK:-0}
8 | PORT=${PORT:-29500}
9 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
10 |
11 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
12 | python -m torch.distributed.launch \
13 | --nnodes=$NNODES \
14 | --node_rank=$NODE_RANK \
15 | --master_addr=$MASTER_ADDR \
16 | --nproc_per_node=$GPUS \
17 | --master_port=$PORT \
18 | $(dirname "$0")/test.py \
19 | $CONFIG \
20 | $CHECKPOINT \
21 | --launcher pytorch \
22 | ${@:4}
23 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tools/dist_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | CONFIG=$1
4 | GPUS=$2
5 | NNODES=${NNODES:-1}
6 | NODE_RANK=${NODE_RANK:-0}
7 | PORT=${PORT:-29500}
8 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
9 |
10 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
11 | python -m torch.distributed.launch \
12 | --nnodes=$NNODES \
13 | --node_rank=$NODE_RANK \
14 | --master_addr=$MASTER_ADDR \
15 | --nproc_per_node=$GPUS \
16 | --master_port=$PORT \
17 | $(dirname "$0")/train.py \
18 | $CONFIG \
19 | --seed 0 \
20 | --launcher pytorch ${@:3}
21 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tools/misc/gen_coco_panoptic_test_info.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os.path as osp
3 |
4 | import mmcv
5 |
6 |
7 | def parse_args():
8 | parser = argparse.ArgumentParser(
9 | description='Generate COCO test image information '
10 | 'for COCO panoptic segmentation.')
11 | parser.add_argument('data_root', help='Path to COCO annotation directory.')
12 | args = parser.parse_args()
13 |
14 | return args
15 |
16 |
17 | def main():
18 | args = parse_args()
19 | data_root = args.data_root
20 | val_info = mmcv.load(osp.join(data_root, 'panoptic_val2017.json'))
21 | test_old_info = mmcv.load(
22 | osp.join(data_root, 'image_info_test-dev2017.json'))
23 |
24 | # replace categories from image_info_test-dev2017.json
25 | # with categories from panoptic_val2017.json which
26 | # has attribute `isthing`.
27 | test_info = test_old_info
28 | test_info.update({'categories': val_info['categories']})
29 | mmcv.dump(test_info,
30 | osp.join(data_root, 'panoptic_image_info_test-dev2017.json'))
31 |
32 |
33 | if __name__ == '__main__':
34 | main()
35 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tools/model_converters/selfsup2mmdet.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import argparse
3 | from collections import OrderedDict
4 |
5 | import torch
6 |
7 |
8 | def moco_convert(src, dst):
9 | """Convert keys in pycls pretrained moco models to mmdet style."""
10 | # load caffe model
11 | moco_model = torch.load(src)
12 | blobs = moco_model['state_dict']
13 | # convert to pytorch style
14 | state_dict = OrderedDict()
15 | for k, v in blobs.items():
16 | if not k.startswith('module.encoder_q.'):
17 | continue
18 | old_k = k
19 | k = k.replace('module.encoder_q.', '')
20 | state_dict[k] = v
21 | print(old_k, '->', k)
22 | # save checkpoint
23 | checkpoint = dict()
24 | checkpoint['state_dict'] = state_dict
25 | torch.save(checkpoint, dst)
26 |
27 |
28 | def main():
29 | parser = argparse.ArgumentParser(description='Convert model keys')
30 | parser.add_argument('src', help='src detectron model path')
31 | parser.add_argument('dst', help='save path')
32 | parser.add_argument(
33 | '--selfsup', type=str, choices=['moco', 'swav'], help='save path')
34 | args = parser.parse_args()
35 | if args.selfsup == 'moco':
36 | moco_convert(args.src, args.dst)
37 | elif args.selfsup == 'swav':
38 | print('SWAV does not need to convert the keys')
39 |
40 |
41 | if __name__ == '__main__':
42 | main()
43 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tools/slurm_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | CHECKPOINT=$4
9 | GPUS=${GPUS:-8}
10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12 | PY_ARGS=${@:5}
13 | SRUN_ARGS=${SRUN_ARGS:-""}
14 |
15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16 | srun -p ${PARTITION} \
17 | --job-name=${JOB_NAME} \
18 | --gres=gpu:${GPUS_PER_NODE} \
19 | --ntasks=${GPUS} \
20 | --ntasks-per-node=${GPUS_PER_NODE} \
21 | --cpus-per-task=${CPUS_PER_TASK} \
22 | --kill-on-bad-exit=1 \
23 | ${SRUN_ARGS} \
24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS}
25 |
--------------------------------------------------------------------------------
/mmdet-v2.23/tools/slurm_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | WORK_DIR=$4
9 | GPUS=${GPUS:-8}
10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12 | SRUN_ARGS=${SRUN_ARGS:-""}
13 | PY_ARGS=${@:5}
14 |
15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16 | srun -p ${PARTITION} \
17 | --job-name=${JOB_NAME} \
18 | --gres=gpu:${GPUS_PER_NODE} \
19 | --ntasks=${GPUS} \
20 | --ntasks-per-node=${GPUS_PER_NODE} \
21 | --cpus-per-task=${CPUS_PER_TASK} \
22 | --kill-on-bad-exit=1 \
23 | ${SRUN_ARGS} \
24 | python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS}
25 |
--------------------------------------------------------------------------------
/mmseg-v0.28/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - name: "MMSegmentation Contributors"
5 | title: "OpenMMLab Semantic Segmentation Toolbox and Benchmark"
6 | date-released: 2020-07-10
7 | url: "https://github.com/open-mmlab/mmsegmentation"
8 | license: Apache-2.0
9 |
--------------------------------------------------------------------------------
/mmseg-v0.28/LICENSES.md:
--------------------------------------------------------------------------------
1 | # Licenses for special features
2 |
3 | In this file, we list the features with other licenses instead of Apache 2.0. Users should be careful about adopting these features in any commercial matters.
4 |
5 | | Feature | Files | License |
6 | | :-------: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------: |
7 | | SegFormer | [mmseg/models/decode_heads/segformer_head.py](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/segformer_head.py) | [NVIDIA License](https://github.com/NVlabs/SegFormer#license) |
8 |
--------------------------------------------------------------------------------
/mmseg-v0.28/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements/*.txt
2 | include mmseg/.mim/model-index.yml
3 | recursive-include mmseg/.mim/configs *.py *.yml
4 | recursive-include mmseg/.mim/tools *.py *.sh
5 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/datasets/cityscapes_1024x1024.py:
--------------------------------------------------------------------------------
1 | _base_ = './cityscapes.py'
2 | img_norm_cfg = dict(
3 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4 | crop_size = (1024, 1024)
5 | train_pipeline = [
6 | dict(type='LoadImageFromFile'),
7 | dict(type='LoadAnnotations'),
8 | dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
9 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10 | dict(type='RandomFlip', prob=0.5),
11 | dict(type='PhotoMetricDistortion'),
12 | dict(type='Normalize', **img_norm_cfg),
13 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14 | dict(type='DefaultFormatBundle'),
15 | dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16 | ]
17 | test_pipeline = [
18 | dict(type='LoadImageFromFile'),
19 | dict(
20 | type='MultiScaleFlipAug',
21 | img_scale=(2048, 1024),
22 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23 | flip=False,
24 | transforms=[
25 | dict(type='Resize', keep_ratio=True),
26 | dict(type='RandomFlip'),
27 | dict(type='Normalize', **img_norm_cfg),
28 | dict(type='ImageToTensor', keys=['img']),
29 | dict(type='Collect', keys=['img']),
30 | ])
31 | ]
32 | data = dict(
33 | train=dict(pipeline=train_pipeline),
34 | val=dict(pipeline=test_pipeline),
35 | test=dict(pipeline=test_pipeline))
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/datasets/cityscapes_768x768.py:
--------------------------------------------------------------------------------
1 | _base_ = './cityscapes.py'
2 | img_norm_cfg = dict(
3 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4 | crop_size = (768, 768)
5 | train_pipeline = [
6 | dict(type='LoadImageFromFile'),
7 | dict(type='LoadAnnotations'),
8 | dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
9 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10 | dict(type='RandomFlip', prob=0.5),
11 | dict(type='PhotoMetricDistortion'),
12 | dict(type='Normalize', **img_norm_cfg),
13 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14 | dict(type='DefaultFormatBundle'),
15 | dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16 | ]
17 | test_pipeline = [
18 | dict(type='LoadImageFromFile'),
19 | dict(
20 | type='MultiScaleFlipAug',
21 | img_scale=(2049, 1025),
22 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23 | flip=False,
24 | transforms=[
25 | dict(type='Resize', keep_ratio=True),
26 | dict(type='RandomFlip'),
27 | dict(type='Normalize', **img_norm_cfg),
28 | dict(type='ImageToTensor', keys=['img']),
29 | dict(type='Collect', keys=['img']),
30 | ])
31 | ]
32 | data = dict(
33 | train=dict(pipeline=train_pipeline),
34 | val=dict(pipeline=test_pipeline),
35 | test=dict(pipeline=test_pipeline))
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/datasets/cityscapes_769x769.py:
--------------------------------------------------------------------------------
1 | _base_ = './cityscapes.py'
2 | img_norm_cfg = dict(
3 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4 | crop_size = (769, 769)
5 | train_pipeline = [
6 | dict(type='LoadImageFromFile'),
7 | dict(type='LoadAnnotations'),
8 | dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
9 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10 | dict(type='RandomFlip', prob=0.5),
11 | dict(type='PhotoMetricDistortion'),
12 | dict(type='Normalize', **img_norm_cfg),
13 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14 | dict(type='DefaultFormatBundle'),
15 | dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16 | ]
17 | test_pipeline = [
18 | dict(type='LoadImageFromFile'),
19 | dict(
20 | type='MultiScaleFlipAug',
21 | img_scale=(2049, 1025),
22 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23 | flip=False,
24 | transforms=[
25 | dict(type='Resize', keep_ratio=True),
26 | dict(type='RandomFlip'),
27 | dict(type='Normalize', **img_norm_cfg),
28 | dict(type='ImageToTensor', keys=['img']),
29 | dict(type='Collect', keys=['img']),
30 | ])
31 | ]
32 | data = dict(
33 | train=dict(pipeline=train_pipeline),
34 | val=dict(pipeline=test_pipeline),
35 | test=dict(pipeline=test_pipeline))
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/datasets/cityscapes_832x832.py:
--------------------------------------------------------------------------------
1 | _base_ = './cityscapes.py'
2 | img_norm_cfg = dict(
3 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4 | crop_size = (832, 832)
5 | train_pipeline = [
6 | dict(type='LoadImageFromFile'),
7 | dict(type='LoadAnnotations'),
8 | dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
9 | dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10 | dict(type='RandomFlip', prob=0.5),
11 | dict(type='PhotoMetricDistortion'),
12 | dict(type='Normalize', **img_norm_cfg),
13 | dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14 | dict(type='DefaultFormatBundle'),
15 | dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16 | ]
17 | test_pipeline = [
18 | dict(type='LoadImageFromFile'),
19 | dict(
20 | type='MultiScaleFlipAug',
21 | img_scale=(2048, 1024),
22 | # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23 | flip=False,
24 | transforms=[
25 | dict(type='Resize', keep_ratio=True),
26 | dict(type='RandomFlip'),
27 | dict(type='Normalize', **img_norm_cfg),
28 | dict(type='ImageToTensor', keys=['img']),
29 | dict(type='Collect', keys=['img']),
30 | ])
31 | ]
32 | data = dict(
33 | train=dict(pipeline=train_pipeline),
34 | val=dict(pipeline=test_pipeline),
35 | test=dict(pipeline=test_pipeline))
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/datasets/pascal_voc12_aug.py:
--------------------------------------------------------------------------------
1 | _base_ = './pascal_voc12.py'
2 | # dataset settings
3 | data = dict(
4 | train=dict(
5 | ann_dir=['SegmentationClass', 'SegmentationClassAug'],
6 | split=[
7 | 'ImageSets/Segmentation/train.txt',
8 | 'ImageSets/Segmentation/aug.txt'
9 | ]))
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | # yapf:disable
2 | log_config = dict(
3 | interval=50,
4 | hooks=[
5 | dict(type='TextLoggerHook', by_epoch=False),
6 | # dict(type='TensorboardLoggerHook')
7 | # dict(type='PaviLoggerHook') # for internal services
8 | ])
9 | # yapf:enable
10 | dist_params = dict(backend='nccl')
11 | log_level = 'INFO'
12 | load_from = None
13 | resume_from = None
14 | workflow = [('train', 1)]
15 | cudnn_benchmark = True
16 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/models/cgnet.py:
--------------------------------------------------------------------------------
1 | # model settings
2 | norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True)
3 | model = dict(
4 | type='EncoderDecoder',
5 | backbone=dict(
6 | type='CGNet',
7 | norm_cfg=norm_cfg,
8 | in_channels=3,
9 | num_channels=(32, 64, 128),
10 | num_blocks=(3, 21),
11 | dilations=(2, 4),
12 | reductions=(8, 16)),
13 | decode_head=dict(
14 | type='FCNHead',
15 | in_channels=256,
16 | in_index=2,
17 | channels=256,
18 | num_convs=0,
19 | concat_input=False,
20 | dropout_ratio=0,
21 | num_classes=19,
22 | norm_cfg=norm_cfg,
23 | loss_decode=dict(
24 | type='CrossEntropyLoss',
25 | use_sigmoid=False,
26 | loss_weight=1.0,
27 | class_weight=[
28 | 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352,
29 | 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905,
30 | 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587,
31 | 10.396974, 10.055647
32 | ])),
33 | # model training and testing settings
34 | train_cfg=dict(sampler=None),
35 | test_cfg=dict(mode='whole'))
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/models/dpt_vit-b16.py:
--------------------------------------------------------------------------------
1 | norm_cfg = dict(type='SyncBN', requires_grad=True)
2 | model = dict(
3 | type='EncoderDecoder',
4 | pretrained='pretrain/vit-b16_p16_224-80ecf9dd.pth', # noqa
5 | backbone=dict(
6 | type='VisionTransformer',
7 | img_size=224,
8 | embed_dims=768,
9 | num_layers=12,
10 | num_heads=12,
11 | out_indices=(2, 5, 8, 11),
12 | final_norm=False,
13 | with_cls_token=True,
14 | output_cls_token=True),
15 | decode_head=dict(
16 | type='DPTHead',
17 | in_channels=(768, 768, 768, 768),
18 | channels=256,
19 | embed_dims=768,
20 | post_process_channels=[96, 192, 384, 768],
21 | num_classes=150,
22 | readout_type='project',
23 | input_transform='multiple_select',
24 | in_index=(0, 1, 2, 3),
25 | norm_cfg=norm_cfg,
26 | loss_decode=dict(
27 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
28 | auxiliary_head=None,
29 | # model training and testing settings
30 | train_cfg=dict(),
31 | test_cfg=dict(mode='whole')) # yapf: disable
32 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/models/erfnet_fcn.py:
--------------------------------------------------------------------------------
1 | # model settings
2 | norm_cfg = dict(type='SyncBN', requires_grad=True)
3 | model = dict(
4 | type='EncoderDecoder',
5 | pretrained=None,
6 | backbone=dict(
7 | type='ERFNet',
8 | in_channels=3,
9 | enc_downsample_channels=(16, 64, 128),
10 | enc_stage_non_bottlenecks=(5, 8),
11 | enc_non_bottleneck_dilations=(2, 4, 8, 16),
12 | enc_non_bottleneck_channels=(64, 128),
13 | dec_upsample_channels=(64, 16),
14 | dec_stages_non_bottleneck=(2, 2),
15 | dec_non_bottleneck_channels=(64, 16),
16 | dropout_ratio=0.1,
17 | init_cfg=None),
18 | decode_head=dict(
19 | type='FCNHead',
20 | in_channels=16,
21 | channels=128,
22 | num_convs=1,
23 | concat_input=False,
24 | dropout_ratio=0.1,
25 | num_classes=19,
26 | norm_cfg=norm_cfg,
27 | align_corners=False,
28 | loss_decode=dict(
29 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
30 | # model training and testing settings
31 | train_cfg=dict(),
32 | test_cfg=dict(mode='whole'))
33 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/models/fpn_r50.py:
--------------------------------------------------------------------------------
1 | # model settings
2 | norm_cfg = dict(type='SyncBN', requires_grad=True)
3 | model = dict(
4 | type='EncoderDecoder',
5 | pretrained='open-mmlab://resnet50_v1c',
6 | backbone=dict(
7 | type='ResNetV1c',
8 | depth=50,
9 | num_stages=4,
10 | out_indices=(0, 1, 2, 3),
11 | dilations=(1, 1, 1, 1),
12 | strides=(1, 2, 2, 2),
13 | norm_cfg=norm_cfg,
14 | norm_eval=False,
15 | style='pytorch',
16 | contract_dilation=True),
17 | neck=dict(
18 | type='FPN',
19 | in_channels=[256, 512, 1024, 2048],
20 | out_channels=256,
21 | num_outs=4),
22 | decode_head=dict(
23 | type='FPNHead',
24 | in_channels=[256, 256, 256, 256],
25 | in_index=[0, 1, 2, 3],
26 | feature_strides=[4, 8, 16, 32],
27 | channels=128,
28 | dropout_ratio=0.1,
29 | num_classes=19,
30 | norm_cfg=norm_cfg,
31 | align_corners=False,
32 | loss_decode=dict(
33 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
34 | # model training and testing settings
35 | train_cfg=dict(),
36 | test_cfg=dict(mode='whole'))
37 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/models/lraspp_m-v3-d8.py:
--------------------------------------------------------------------------------
1 | # model settings
2 | norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True)
3 | model = dict(
4 | type='EncoderDecoder',
5 | backbone=dict(
6 | type='MobileNetV3',
7 | arch='large',
8 | out_indices=(1, 3, 16),
9 | norm_cfg=norm_cfg),
10 | decode_head=dict(
11 | type='LRASPPHead',
12 | in_channels=(16, 24, 960),
13 | in_index=(0, 1, 2),
14 | channels=128,
15 | input_transform='multiple_select',
16 | dropout_ratio=0.1,
17 | num_classes=19,
18 | norm_cfg=norm_cfg,
19 | act_cfg=dict(type='ReLU'),
20 | align_corners=False,
21 | loss_decode=dict(
22 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
23 | # model training and testing settings
24 | train_cfg=dict(),
25 | test_cfg=dict(mode='whole'))
26 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/models/segformer_mit-b0.py:
--------------------------------------------------------------------------------
1 | # model settings
2 | norm_cfg = dict(type='SyncBN', requires_grad=True)
3 | model = dict(
4 | type='EncoderDecoder',
5 | pretrained=None,
6 | backbone=dict(
7 | type='MixVisionTransformer',
8 | in_channels=3,
9 | embed_dims=32,
10 | num_stages=4,
11 | num_layers=[2, 2, 2, 2],
12 | num_heads=[1, 2, 5, 8],
13 | patch_sizes=[7, 3, 3, 3],
14 | sr_ratios=[8, 4, 2, 1],
15 | out_indices=(0, 1, 2, 3),
16 | mlp_ratio=4,
17 | qkv_bias=True,
18 | drop_rate=0.0,
19 | attn_drop_rate=0.0,
20 | drop_path_rate=0.1),
21 | decode_head=dict(
22 | type='SegformerHead',
23 | in_channels=[32, 64, 160, 256],
24 | in_index=[0, 1, 2, 3],
25 | channels=256,
26 | dropout_ratio=0.1,
27 | num_classes=19,
28 | norm_cfg=norm_cfg,
29 | align_corners=False,
30 | loss_decode=dict(
31 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
32 | # model training and testing settings
33 | train_cfg=dict(),
34 | test_cfg=dict(mode='whole'))
35 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/models/segmenter_vit-b16_mask.py:
--------------------------------------------------------------------------------
1 | checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_base_p16_384_20220308-96dfe169.pth' # noqa
2 | # model settings
3 | backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True)
4 | model = dict(
5 | type='EncoderDecoder',
6 | pretrained=checkpoint,
7 | backbone=dict(
8 | type='VisionTransformer',
9 | img_size=(512, 512),
10 | patch_size=16,
11 | in_channels=3,
12 | embed_dims=768,
13 | num_layers=12,
14 | num_heads=12,
15 | drop_path_rate=0.1,
16 | attn_drop_rate=0.0,
17 | drop_rate=0.0,
18 | final_norm=True,
19 | norm_cfg=backbone_norm_cfg,
20 | with_cls_token=True,
21 | interpolate_mode='bicubic',
22 | ),
23 | decode_head=dict(
24 | type='SegmenterMaskTransformerHead',
25 | in_channels=768,
26 | channels=768,
27 | num_classes=150,
28 | num_layers=2,
29 | num_heads=12,
30 | embed_dims=768,
31 | dropout_ratio=0.0,
32 | loss_decode=dict(
33 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
34 | ),
35 | test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(480, 480)),
36 | )
37 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/schedules/schedule_160k.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
3 | optimizer_config = dict()
4 | # learning policy
5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
6 | # runtime settings
7 | runner = dict(type='IterBasedRunner', max_iters=160000)
8 | checkpoint_config = dict(by_epoch=False, interval=16000)
9 | evaluation = dict(interval=16000, metric='mIoU', pre_eval=True)
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/schedules/schedule_20k.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
3 | optimizer_config = dict()
4 | # learning policy
5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
6 | # runtime settings
7 | runner = dict(type='IterBasedRunner', max_iters=20000)
8 | checkpoint_config = dict(by_epoch=False, interval=2000)
9 | evaluation = dict(interval=2000, metric='mIoU', pre_eval=True)
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/schedules/schedule_320k.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
3 | optimizer_config = dict()
4 | # learning policy
5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
6 | # runtime settings
7 | runner = dict(type='IterBasedRunner', max_iters=320000)
8 | checkpoint_config = dict(by_epoch=False, interval=32000)
9 | evaluation = dict(interval=32000, metric='mIoU')
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/schedules/schedule_40k.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
3 | optimizer_config = dict()
4 | # learning policy
5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
6 | # runtime settings
7 | runner = dict(type='IterBasedRunner', max_iters=40000)
8 | checkpoint_config = dict(by_epoch=False, interval=4000)
9 | evaluation = dict(interval=4000, metric='mIoU', pre_eval=True)
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/configs/_base_/schedules/schedule_80k.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
3 | optimizer_config = dict()
4 | # learning policy
5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
6 | # runtime settings
7 | runner = dict(type='IterBasedRunner', max_iters=80000)
8 | checkpoint_config = dict(by_epoch=False, interval=8000)
9 | evaluation = dict(interval=8000, metric='mIoU', pre_eval=True)
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/demo/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/demo/demo.png
--------------------------------------------------------------------------------
/mmseg-v0.28/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG PYTORCH="1.11.0"
2 | ARG CUDA="11.3"
3 | ARG CUDNN="8"
4 |
5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
6 |
7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../"
10 |
11 | # To fix GPG key error when running apt-get update
12 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
13 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
14 |
15 | RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx \
16 | && apt-get clean \
17 | && rm -rf /var/lib/apt/lists/*
18 |
19 | RUN conda clean --all
20 |
21 | # Install MMCV
22 | ARG PYTORCH
23 | ARG CUDA
24 | ARG MMCV
25 | RUN ["/bin/bash", "-c", "pip install --no-cache-dir mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${PYTORCH}/index.html"]
26 |
27 | # Install MMSegmentation
28 | RUN git clone https://github.com/open-mmlab/mmsegmentation.git /mmsegmentation
29 | WORKDIR /mmsegmentation
30 | ENV FORCE_CUDA="1"
31 | RUN pip install -r requirements.txt
32 | RUN pip install --no-cache-dir -e .
33 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docker/serve/config.properties:
--------------------------------------------------------------------------------
1 | inference_address=http://0.0.0.0:8080
2 | management_address=http://0.0.0.0:8081
3 | metrics_address=http://0.0.0.0:8082
4 | model_store=/home/model-server/model-store
5 | load_models=all
6 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docker/serve/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | if [[ "$1" = "serve" ]]; then
5 | shift 1
6 | torchserve --start --ts-config /home/model-server/config.properties
7 | else
8 | eval "$@"
9 | fi
10 |
11 | # prevent docker exit
12 | tail -f /dev/null
13 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../images/mmsegmentation.png");
3 | background-size: 201px 40px;
4 | height: 40px;
5 | width: 201px;
6 | }
7 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/_static/images/mmsegmentation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/docs/en/_static/images/mmsegmentation.png
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/api.rst:
--------------------------------------------------------------------------------
1 | mmseg.apis
2 | --------------
3 | .. automodule:: mmseg.apis
4 | :members:
5 |
6 | mmseg.core
7 | --------------
8 |
9 | seg
10 | ^^^^^^^^^^
11 | .. automodule:: mmseg.core.seg
12 | :members:
13 |
14 | evaluation
15 | ^^^^^^^^^^
16 | .. automodule:: mmseg.core.evaluation
17 | :members:
18 |
19 | utils
20 | ^^^^^^^^^^
21 | .. automodule:: mmseg.core.utils
22 | :members:
23 |
24 | mmseg.datasets
25 | --------------
26 |
27 | datasets
28 | ^^^^^^^^^^
29 | .. automodule:: mmseg.datasets
30 | :members:
31 |
32 | pipelines
33 | ^^^^^^^^^^
34 | .. automodule:: mmseg.datasets.pipelines
35 | :members:
36 |
37 | mmseg.models
38 | --------------
39 |
40 | segmentors
41 | ^^^^^^^^^^
42 | .. automodule:: mmseg.models.segmentors
43 | :members:
44 |
45 | backbones
46 | ^^^^^^^^^^
47 | .. automodule:: mmseg.models.backbones
48 | :members:
49 |
50 | decode_heads
51 | ^^^^^^^^^^^^
52 | .. automodule:: mmseg.models.decode_heads
53 | :members:
54 |
55 | losses
56 | ^^^^^^^^^^
57 | .. automodule:: mmseg.models.losses
58 | :members:
59 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to MMSegmentation's documentation!
2 | =======================================
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 | :caption: Get Started
7 |
8 | get_started.md
9 |
10 | .. toctree::
11 | :maxdepth: 1
12 | :caption: Dataset Preparation
13 |
14 | dataset_prepare.md
15 |
16 | .. toctree::
17 | :maxdepth: 1
18 | :caption: Model Zoo
19 |
20 | model_zoo.md
21 | modelzoo_statistics.md
22 |
23 | .. toctree::
24 | :maxdepth: 2
25 | :caption: Quick Run
26 |
27 | train.md
28 | inference.md
29 |
30 | .. toctree::
31 | :maxdepth: 2
32 | :caption: Tutorials
33 |
34 | tutorials/index.rst
35 |
36 | .. toctree::
37 | :maxdepth: 2
38 | :caption: Useful Tools and Scripts
39 |
40 | useful_tools.md
41 |
42 | .. toctree::
43 | :maxdepth: 2
44 | :caption: Notes
45 |
46 | changelog.md
47 | faq.md
48 |
49 | .. toctree::
50 | :caption: Switch Language
51 |
52 | switch_language.md
53 |
54 | .. toctree::
55 | :caption: API Reference
56 |
57 | api.rst
58 |
59 | Indices and tables
60 | ==================
61 |
62 | * :ref:`genindex`
63 | * :ref:`search`
64 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/en/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 2
3 |
4 | config.md
5 | customize_datasets.md
6 | data_pipeline.md
7 | customize_models.md
8 | training_tricks.md
9 | customize_runtime.md
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../images/mmsegmentation.png");
3 | background-size: 201px 40px;
4 | height: 40px;
5 | width: 201px;
6 | }
7 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/_static/images/mmsegmentation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/docs/zh_cn/_static/images/mmsegmentation.png
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/api.rst:
--------------------------------------------------------------------------------
1 | mmseg.apis
2 | --------------
3 | .. automodule:: mmseg.apis
4 | :members:
5 |
6 | mmseg.core
7 | --------------
8 |
9 | seg
10 | ^^^^^^^^^^
11 | .. automodule:: mmseg.core.seg
12 | :members:
13 |
14 | evaluation
15 | ^^^^^^^^^^
16 | .. automodule:: mmseg.core.evaluation
17 | :members:
18 |
19 | utils
20 | ^^^^^^^^^^
21 | .. automodule:: mmseg.core.utils
22 | :members:
23 |
24 | mmseg.datasets
25 | --------------
26 |
27 | datasets
28 | ^^^^^^^^^^
29 | .. automodule:: mmseg.datasets
30 | :members:
31 |
32 | pipelines
33 | ^^^^^^^^^^
34 | .. automodule:: mmseg.datasets.pipelines
35 | :members:
36 |
37 | mmseg.models
38 | --------------
39 |
40 | segmentors
41 | ^^^^^^^^^^
42 | .. automodule:: mmseg.models.segmentors
43 | :members:
44 |
45 | backbones
46 | ^^^^^^^^^^
47 | .. automodule:: mmseg.models.backbones
48 | :members:
49 |
50 | decode_heads
51 | ^^^^^^^^^^^^
52 | .. automodule:: mmseg.models.decode_heads
53 | :members:
54 |
55 | losses
56 | ^^^^^^^^^^
57 | .. automodule:: mmseg.models.losses
58 | :members:
59 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/imgs/qq_group_qrcode.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/docs/zh_cn/imgs/qq_group_qrcode.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/imgs/zhihu_qrcode.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/docs/zh_cn/imgs/zhihu_qrcode.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/index.rst:
--------------------------------------------------------------------------------
1 | 欢迎来到 MMSegmentation 的文档!
2 | =======================================
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 | :caption: 开始你的第一步
7 |
8 | get_started.md
9 |
10 | .. toctree::
11 | :maxdepth: 1
12 | :caption: 数据集准备
13 |
14 | dataset_prepare.md
15 |
16 | .. toctree::
17 | :maxdepth: 1
18 | :caption: 模型库
19 |
20 | model_zoo.md
21 | modelzoo_statistics.md
22 |
23 | .. toctree::
24 | :maxdepth: 2
25 | :caption: 快速启动
26 |
27 | train.md
28 | inference.md
29 |
30 | .. toctree::
31 | :maxdepth: 2
32 | :caption: 教程
33 |
34 | tutorials/index.rst
35 |
36 | .. toctree::
37 | :maxdepth: 2
38 | :caption: 实用工具与脚本
39 |
40 | useful_tools.md
41 |
42 | .. toctree::
43 | :maxdepth: 2
44 | :caption: 说明
45 |
46 | changelog.md
47 | faq.md
48 |
49 | .. toctree::
50 | :caption: 语言切换
51 |
52 | switch_language.md
53 |
54 | .. toctree::
55 | :caption: 接口文档(英文)
56 |
57 | api.rst
58 |
59 | Indices and tables
60 | ==================
61 |
62 | * :ref:`genindex`
63 | * :ref:`search`
64 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/mmseg-v0.28/docs/zh_cn/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 2
3 |
4 | config.md
5 | customize_datasets.md
6 | data_pipeline.md
7 | customize_models.md
8 | training_tricks.md
9 | customize_runtime.md
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/apis/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .inference import inference_segmentor, init_segmentor, show_result_pyplot
3 | from .test import multi_gpu_test, single_gpu_test
4 | from .train import (get_root_logger, init_random_seed, set_random_seed,
5 | train_segmentor)
6 |
7 | __all__ = [
8 | 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor',
9 | 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test',
10 | 'show_result_pyplot', 'init_random_seed'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import (OPTIMIZER_BUILDERS, build_optimizer,
3 | build_optimizer_constructor)
4 | from .evaluation import * # noqa: F401, F403
5 | from .hook import * # noqa: F401, F403
6 | from .optimizers import * # noqa: F401, F403
7 | from .seg import * # noqa: F401, F403
8 | from .utils import * # noqa: F401, F403
9 |
10 | __all__ = [
11 | 'OPTIMIZER_BUILDERS', 'build_optimizer', 'build_optimizer_constructor'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import copy
3 |
4 | from mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMCV_OPTIMIZER_BUILDERS
5 | from mmcv.utils import Registry, build_from_cfg
6 |
7 | OPTIMIZER_BUILDERS = Registry(
8 | 'optimizer builder', parent=MMCV_OPTIMIZER_BUILDERS)
9 |
10 |
11 | def build_optimizer_constructor(cfg):
12 | constructor_type = cfg.get('type')
13 | if constructor_type in OPTIMIZER_BUILDERS:
14 | return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
15 | elif constructor_type in MMCV_OPTIMIZER_BUILDERS:
16 | return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS)
17 | else:
18 | raise KeyError(f'{constructor_type} is not registered '
19 | 'in the optimizer builder registry.')
20 |
21 |
22 | def build_optimizer(model, cfg):
23 | optimizer_cfg = copy.deepcopy(cfg)
24 | constructor_type = optimizer_cfg.pop('constructor',
25 | 'DefaultOptimizerConstructor')
26 | paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
27 | optim_constructor = build_optimizer_constructor(
28 | dict(
29 | type=constructor_type,
30 | optimizer_cfg=optimizer_cfg,
31 | paramwise_cfg=paramwise_cfg))
32 | optimizer = optim_constructor(model)
33 | return optimizer
34 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .class_names import get_classes, get_palette
3 | from .eval_hooks import DistEvalHook, EvalHook
4 | from .metrics import (eval_metrics, intersect_and_union, mean_dice,
5 | mean_fscore, mean_iou, pre_eval_to_metrics)
6 |
7 | __all__ = [
8 | 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore',
9 | 'eval_metrics', 'get_classes', 'get_palette', 'pre_eval_to_metrics',
10 | 'intersect_and_union'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/hook/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .wandblogger_hook import MMSegWandbHook
3 |
4 | __all__ = ['MMSegWandbHook']
5 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/optimizers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .layer_decay_optimizer_constructor import (
3 | LayerDecayOptimizerConstructor, LearningRateDecayOptimizerConstructor)
4 |
5 | __all__ = [
6 | 'LearningRateDecayOptimizerConstructor', 'LayerDecayOptimizerConstructor'
7 | ]
8 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/seg/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import build_pixel_sampler
3 | from .sampler import BasePixelSampler, OHEMPixelSampler
4 |
5 | __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler']
6 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/seg/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.utils import Registry, build_from_cfg
3 |
4 | PIXEL_SAMPLERS = Registry('pixel sampler')
5 |
6 |
7 | def build_pixel_sampler(cfg, **default_args):
8 | """Build pixel sampler for segmentation map."""
9 | return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/seg/sampler/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_pixel_sampler import BasePixelSampler
3 | from .ohem_pixel_sampler import OHEMPixelSampler
4 |
5 | __all__ = ['BasePixelSampler', 'OHEMPixelSampler']
6 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/seg/sampler/base_pixel_sampler.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from abc import ABCMeta, abstractmethod
3 |
4 |
5 | class BasePixelSampler(metaclass=ABCMeta):
6 | """Base class of pixel sampler."""
7 |
8 | def __init__(self, **kwargs):
9 | pass
10 |
11 | @abstractmethod
12 | def sample(self, seg_logit, seg_label):
13 | """Placeholder for sample function."""
14 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dist_util import check_dist_init, sync_random_seed
3 | from .misc import add_prefix
4 |
5 | __all__ = ['add_prefix', 'check_dist_init', 'sync_random_seed']
6 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/core/utils/misc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | def add_prefix(inputs, prefix):
3 | """Add prefix for dict.
4 |
5 | Args:
6 | inputs (dict): The input dict with str keys.
7 | prefix (str): The prefix to add.
8 |
9 | Returns:
10 |
11 | dict: The dict with keys updated with ``prefix``.
12 | """
13 |
14 | outputs = dict()
15 | for name, value in inputs.items():
16 | outputs[f'{prefix}.{name}'] = value
17 |
18 | return outputs
19 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .ade import ADE20KDataset
3 | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
4 | from .chase_db1 import ChaseDB1Dataset
5 | from .cityscapes import CityscapesDataset
6 | from .coco_stuff import COCOStuffDataset
7 | from .custom import CustomDataset
8 | from .dark_zurich import DarkZurichDataset
9 | from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset,
10 | RepeatDataset)
11 | from .drive import DRIVEDataset
12 | from .hrf import HRFDataset
13 | from .isaid import iSAIDDataset
14 | from .isprs import ISPRSDataset
15 | from .loveda import LoveDADataset
16 | from .night_driving import NightDrivingDataset
17 | from .pascal_context import PascalContextDataset, PascalContextDataset59
18 | from .potsdam import PotsdamDataset
19 | from .stare import STAREDataset
20 | from .voc import PascalVOCDataset
21 |
22 | __all__ = [
23 | 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
24 | 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
25 | 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
26 | 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
27 | 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset',
28 | 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset',
29 | 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset'
30 | ]
31 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/chase_db1.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | from .builder import DATASETS
4 | from .custom import CustomDataset
5 |
6 |
7 | @DATASETS.register_module()
8 | class ChaseDB1Dataset(CustomDataset):
9 | """Chase_db1 dataset.
10 |
11 | In segmentation map annotation for Chase_db1, 0 stands for background,
12 | which is included in 2 categories. ``reduce_zero_label`` is fixed to False.
13 | The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
14 | '_1stHO.png'.
15 | """
16 |
17 | CLASSES = ('background', 'vessel')
18 |
19 | PALETTE = [[120, 120, 120], [6, 230, 230]]
20 |
21 | def __init__(self, **kwargs):
22 | super(ChaseDB1Dataset, self).__init__(
23 | img_suffix='.png',
24 | seg_map_suffix='_1stHO.png',
25 | reduce_zero_label=False,
26 | **kwargs)
27 | assert self.file_client.exists(self.img_dir)
28 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/dark_zurich.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import DATASETS
3 | from .cityscapes import CityscapesDataset
4 |
5 |
6 | @DATASETS.register_module()
7 | class DarkZurichDataset(CityscapesDataset):
8 | """DarkZurichDataset dataset."""
9 |
10 | def __init__(self, **kwargs):
11 | super().__init__(
12 | img_suffix='_rgb_anon.png',
13 | seg_map_suffix='_gt_labelTrainIds.png',
14 | **kwargs)
15 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/drive.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | from .builder import DATASETS
4 | from .custom import CustomDataset
5 |
6 |
7 | @DATASETS.register_module()
8 | class DRIVEDataset(CustomDataset):
9 | """DRIVE dataset.
10 |
11 | In segmentation map annotation for DRIVE, 0 stands for background, which is
12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The
13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
14 | '_manual1.png'.
15 | """
16 |
17 | CLASSES = ('background', 'vessel')
18 |
19 | PALETTE = [[120, 120, 120], [6, 230, 230]]
20 |
21 | def __init__(self, **kwargs):
22 | super(DRIVEDataset, self).__init__(
23 | img_suffix='.png',
24 | seg_map_suffix='_manual1.png',
25 | reduce_zero_label=False,
26 | **kwargs)
27 | assert self.file_client.exists(self.img_dir)
28 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/hrf.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | from .builder import DATASETS
4 | from .custom import CustomDataset
5 |
6 |
7 | @DATASETS.register_module()
8 | class HRFDataset(CustomDataset):
9 | """HRF dataset.
10 |
11 | In segmentation map annotation for HRF, 0 stands for background, which is
12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The
13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
14 | '.png'.
15 | """
16 |
17 | CLASSES = ('background', 'vessel')
18 |
19 | PALETTE = [[120, 120, 120], [6, 230, 230]]
20 |
21 | def __init__(self, **kwargs):
22 | super(HRFDataset, self).__init__(
23 | img_suffix='.png',
24 | seg_map_suffix='.png',
25 | reduce_zero_label=False,
26 | **kwargs)
27 | assert self.file_client.exists(self.img_dir)
28 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/isprs.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import DATASETS
3 | from .custom import CustomDataset
4 |
5 |
6 | @DATASETS.register_module()
7 | class ISPRSDataset(CustomDataset):
8 | """ISPRS dataset.
9 |
10 | In segmentation map annotation for LoveDA, 0 is the ignore index.
11 | ``reduce_zero_label`` should be set to True. The ``img_suffix`` and
12 | ``seg_map_suffix`` are both fixed to '.png'.
13 | """
14 | CLASSES = ('impervious_surface', 'building', 'low_vegetation', 'tree',
15 | 'car', 'clutter')
16 |
17 | PALETTE = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
18 | [255, 255, 0], [255, 0, 0]]
19 |
20 | def __init__(self, **kwargs):
21 | super(ISPRSDataset, self).__init__(
22 | img_suffix='.png',
23 | seg_map_suffix='.png',
24 | reduce_zero_label=True,
25 | **kwargs)
26 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/night_driving.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import DATASETS
3 | from .cityscapes import CityscapesDataset
4 |
5 |
6 | @DATASETS.register_module()
7 | class NightDrivingDataset(CityscapesDataset):
8 | """NightDrivingDataset dataset."""
9 |
10 | def __init__(self, **kwargs):
11 | super().__init__(
12 | img_suffix='_leftImg8bit.png',
13 | seg_map_suffix='_gtCoarse_labelTrainIds.png',
14 | **kwargs)
15 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/pipelines/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .compose import Compose
3 | from .formatting import (Collect, ImageToTensor, ToDataContainer, ToTensor,
4 | Transpose, to_tensor)
5 | from .loading import LoadAnnotations, LoadImageFromFile
6 | from .test_time_aug import MultiScaleFlipAug
7 | from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
8 | PhotoMetricDistortion, RandomCrop, RandomCutOut,
9 | RandomFlip, RandomMosaic, RandomRotate, Rerange,
10 | Resize, RGB2Gray, SegRescale)
11 |
12 | __all__ = [
13 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
14 | 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
15 | 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
16 | 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
17 | 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', 'RandomCutOut',
18 | 'RandomMosaic'
19 | ]
20 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/pipelines/formating.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | # flake8: noqa
3 | import warnings
4 |
5 | from .formatting import *
6 |
7 | warnings.warn('DeprecationWarning: mmseg.datasets.pipelines.formating will be '
8 | 'deprecated in 2021, please replace it with '
9 | 'mmseg.datasets.pipelines.formatting.')
10 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/potsdam.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import DATASETS
3 | from .custom import CustomDataset
4 |
5 |
6 | @DATASETS.register_module()
7 | class PotsdamDataset(CustomDataset):
8 | """ISPRS Potsdam dataset.
9 |
10 | In segmentation map annotation for Potsdam dataset, 0 is the ignore index.
11 | ``reduce_zero_label`` should be set to True. The ``img_suffix`` and
12 | ``seg_map_suffix`` are both fixed to '.png'.
13 | """
14 | CLASSES = ('impervious_surface', 'building', 'low_vegetation', 'tree',
15 | 'car', 'clutter')
16 |
17 | PALETTE = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
18 | [255, 255, 0], [255, 0, 0]]
19 |
20 | def __init__(self, **kwargs):
21 | super(PotsdamDataset, self).__init__(
22 | img_suffix='.png',
23 | seg_map_suffix='.png',
24 | reduce_zero_label=True,
25 | **kwargs)
26 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/samplers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .distributed_sampler import DistributedSampler
3 |
4 | __all__ = ['DistributedSampler']
5 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/stare.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import os.path as osp
3 |
4 | from .builder import DATASETS
5 | from .custom import CustomDataset
6 |
7 |
8 | @DATASETS.register_module()
9 | class STAREDataset(CustomDataset):
10 | """STARE dataset.
11 |
12 | In segmentation map annotation for STARE, 0 stands for background, which is
13 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The
14 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
15 | '.ah.png'.
16 | """
17 |
18 | CLASSES = ('background', 'vessel')
19 |
20 | PALETTE = [[120, 120, 120], [6, 230, 230]]
21 |
22 | def __init__(self, **kwargs):
23 | super(STAREDataset, self).__init__(
24 | img_suffix='.png',
25 | seg_map_suffix='.ah.png',
26 | reduce_zero_label=False,
27 | **kwargs)
28 | assert osp.exists(self.img_dir)
29 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/datasets/voc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import os.path as osp
3 |
4 | from .builder import DATASETS
5 | from .custom import CustomDataset
6 |
7 |
8 | @DATASETS.register_module()
9 | class PascalVOCDataset(CustomDataset):
10 | """Pascal VOC dataset.
11 |
12 | Args:
13 | split (str): Split txt file for Pascal VOC.
14 | """
15 |
16 | CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
17 | 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
18 | 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
19 | 'train', 'tvmonitor')
20 |
21 | PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
22 | [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
23 | [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
24 | [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
25 | [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
26 |
27 | def __init__(self, split, **kwargs):
28 | super(PascalVOCDataset, self).__init__(
29 | img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs)
30 | assert osp.exists(self.img_dir) and self.split is not None
31 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .backbones import * # noqa: F401,F403
3 | from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone,
4 | build_head, build_loss, build_segmentor)
5 | from .decode_heads import * # noqa: F401,F403
6 | from .losses import * # noqa: F401,F403
7 | from .necks import * # noqa: F401,F403
8 | from .segmentors import * # noqa: F401,F403
9 |
10 | __all__ = [
11 | 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone',
12 | 'build_head', 'build_loss', 'build_segmentor'
13 | ]
14 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/models/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .beit import BEiT
3 | from .bisenetv1 import BiSeNetV1
4 | from .bisenetv2 import BiSeNetV2
5 | from .cgnet import CGNet
6 | from .erfnet import ERFNet
7 | from .fast_scnn import FastSCNN
8 | from .hrnet import HRNet
9 | from .icnet import ICNet
10 | from .mae import MAE
11 | from .mit import MixVisionTransformer
12 | from .mobilenet_v2 import MobileNetV2
13 | from .mobilenet_v3 import MobileNetV3
14 | from .resnest import ResNeSt
15 | from .resnet import ResNet, ResNetV1c, ResNetV1d
16 | from .resnext import ResNeXt
17 | from .stdc import STDCContextPathNet, STDCNet
18 | from .swin import SwinTransformer
19 | from .timm_backbone import TIMMBackbone
20 | from .twins import PCPVT, SVT
21 | from .unet import UNet
22 | from .vit import VisionTransformer
23 | from .vit_beit import ViTBEiT
24 |
25 | __all__ = [
26 | 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 'FastSCNN',
27 | 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3',
28 | 'VisionTransformer', 'SwinTransformer', 'MixVisionTransformer',
29 | 'BiSeNetV1', 'BiSeNetV2', 'ICNet', 'TIMMBackbone', 'ERFNet', 'PCPVT',
30 | 'SVT', 'STDCNet', 'STDCContextPathNet', 'BEiT', 'MAE', 'ViTBEiT',
31 | ]
32 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .accuracy import Accuracy, accuracy
3 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
4 | cross_entropy, mask_cross_entropy)
5 | from .dice_loss import DiceLoss
6 | from .focal_loss import FocalLoss
7 | from .lovasz_loss import LovaszLoss
8 | from .tversky_loss import TverskyLoss
9 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss
10 |
11 | __all__ = [
12 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
13 | 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss',
14 | 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss',
15 | 'FocalLoss', 'TverskyLoss'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .featurepyramid import Feature2Pyramid
3 | from .fpn import FPN
4 | from .ic_neck import ICNeck
5 | from .jpu import JPU
6 | from .mla_neck import MLANeck
7 | from .multilevel_neck import MultiLevelNeck
8 |
9 | __all__ = [
10 | 'FPN', 'MultiLevelNeck', 'MLANeck', 'ICNeck', 'JPU', 'Feature2Pyramid'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/models/segmentors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base import BaseSegmentor
3 | from .cascade_encoder_decoder import CascadeEncoderDecoder
4 | from .encoder_decoder import EncoderDecoder
5 |
6 | __all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder']
7 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .embed import PatchEmbed
3 | from .inverted_residual import InvertedResidual, InvertedResidualV3
4 | from .make_divisible import make_divisible
5 | from .res_layer import ResLayer
6 | from .se_layer import SELayer
7 | from .self_attention_block import SelfAttentionBlock
8 | from .shape_convert import (nchw2nlc2nchw, nchw_to_nlc, nlc2nchw2nlc,
9 | nlc_to_nchw)
10 | from .up_conv_block import UpConvBlock
11 |
12 | __all__ = [
13 | 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual',
14 | 'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'PatchEmbed',
15 | 'nchw_to_nlc', 'nlc_to_nchw', 'nchw2nlc2nchw', 'nlc2nchw2nlc'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/models/utils/make_divisible.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
3 | """Make divisible function.
4 |
5 | This function rounds the channel number to the nearest value that can be
6 | divisible by the divisor. It is taken from the original tf repo. It ensures
7 | that all layers have a channel number that is divisible by divisor. It can
8 | be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa
9 |
10 | Args:
11 | value (int): The original channel number.
12 | divisor (int): The divisor to fully divide the channel number.
13 | min_value (int): The minimum value of the output channel.
14 | Default: None, means that the minimum value equal to the divisor.
15 | min_ratio (float): The minimum ratio of the rounded channel number to
16 | the original channel number. Default: 0.9.
17 |
18 | Returns:
19 | int: The modified output channel number.
20 | """
21 |
22 | if min_value is None:
23 | min_value = divisor
24 | new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
25 | # Make sure that round down does not go down by more than (1-min_ratio).
26 | if new_value < min_ratio * value:
27 | new_value += divisor
28 | return new_value
29 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/ops/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .encoding import Encoding
3 | from .wrappers import Upsample, resize
4 |
5 | __all__ = ['Upsample', 'resize', 'Encoding']
6 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .collect_env import collect_env
3 | from .logger import get_root_logger
4 | from .misc import find_latest_checkpoint
5 | from .set_env import setup_multi_processes
6 | from .util_distribution import build_ddp, build_dp, get_device
7 |
8 | __all__ = [
9 | 'get_root_logger', 'collect_env', 'find_latest_checkpoint',
10 | 'setup_multi_processes', 'build_ddp', 'build_dp', 'get_device'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/utils/collect_env.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.utils import collect_env as collect_base_env
3 | from mmcv.utils import get_git_hash
4 |
5 | import mmseg
6 |
7 |
8 | def collect_env():
9 | """Collect the information of the running environments."""
10 | env_info = collect_base_env()
11 | env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
12 |
13 | return env_info
14 |
15 |
16 | if __name__ == '__main__':
17 | for name, val in collect_env().items():
18 | print('{}: {}'.format(name, val))
19 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/utils/logger.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import logging
3 |
4 | from mmcv.utils import get_logger
5 |
6 |
7 | def get_root_logger(log_file=None, log_level=logging.INFO):
8 | """Get the root logger.
9 |
10 | The logger will be initialized if it has not been initialized. By default a
11 | StreamHandler will be added. If `log_file` is specified, a FileHandler will
12 | also be added. The name of the root logger is the top-level package name,
13 | e.g., "mmseg".
14 |
15 | Args:
16 | log_file (str | None): The log filename. If specified, a FileHandler
17 | will be added to the root logger.
18 | log_level (int): The root logger level. Note that only the process of
19 | rank 0 is affected, while other processes will set the level to
20 | "Error" and be silent most of the time.
21 |
22 | Returns:
23 | logging.Logger: The root logger.
24 | """
25 |
26 | logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)
27 |
28 | return logger
29 |
--------------------------------------------------------------------------------
/mmseg-v0.28/mmseg/version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Open-MMLab. All rights reserved.
2 |
3 | __version__ = '0.28.0'
4 |
5 |
6 | def parse_version_info(version_str):
7 | version_info = []
8 | for x in version_str.split('.'):
9 | if x.isdigit():
10 | version_info.append(int(x))
11 | elif x.find('rc') != -1:
12 | patch_version = x.split('rc')
13 | version_info.append(int(patch_version[0]))
14 | version_info.append(f'rc{patch_version[1]}')
15 | return tuple(version_info)
16 |
17 |
18 | version_info = parse_version_info(__version__)
19 |
--------------------------------------------------------------------------------
/mmseg-v0.28/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | addopts = --xdoctest --xdoctest-style=auto
3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs
4 |
5 | filterwarnings= default
6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning
7 | ignore:.*Define the __nice__ method for.*:Warning
8 |
--------------------------------------------------------------------------------
/mmseg-v0.28/requirements.txt:
--------------------------------------------------------------------------------
1 | -r requirements/optional.txt
2 | -r requirements/runtime.txt
3 | -r requirements/tests.txt
4 |
--------------------------------------------------------------------------------
/mmseg-v0.28/requirements/docs.txt:
--------------------------------------------------------------------------------
1 | docutils==0.16.0
2 | myst-parser
3 | -e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
4 | sphinx==4.0.2
5 | sphinx_copybutton
6 | sphinx_markdown_tables
7 |
--------------------------------------------------------------------------------
/mmseg-v0.28/requirements/mminstall.txt:
--------------------------------------------------------------------------------
1 | mmcls>=0.20.1
2 | mmcv-full>=1.4.4,<1.7.0
3 |
--------------------------------------------------------------------------------
/mmseg-v0.28/requirements/optional.txt:
--------------------------------------------------------------------------------
1 | cityscapesscripts
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/requirements/readthedocs.txt:
--------------------------------------------------------------------------------
1 | mmcv
2 | prettytable
3 | torch
4 | torchvision
5 |
--------------------------------------------------------------------------------
/mmseg-v0.28/requirements/runtime.txt:
--------------------------------------------------------------------------------
1 | matplotlib
2 | mmcls>=0.20.1
3 | numpy
4 | packaging
5 | prettytable
6 |
--------------------------------------------------------------------------------
/mmseg-v0.28/requirements/tests.txt:
--------------------------------------------------------------------------------
1 | codecov
2 | flake8
3 | interrogate
4 | pytest
5 | xdoctest>=0.10.0
6 | yapf
7 |
--------------------------------------------------------------------------------
/mmseg-v0.28/resources/3dogs.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/resources/3dogs.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/resources/3dogs_mask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/resources/3dogs_mask.png
--------------------------------------------------------------------------------
/mmseg-v0.28/resources/mmseg-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/resources/mmseg-logo.png
--------------------------------------------------------------------------------
/mmseg-v0.28/resources/seg_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/resources/seg_demo.gif
--------------------------------------------------------------------------------
/mmseg-v0.28/setup.cfg:
--------------------------------------------------------------------------------
1 | [yapf]
2 | based_on_style = pep8
3 | blank_line_before_nested_class_or_def = true
4 | split_before_expression_after_opening_paren = true
5 |
6 | [isort]
7 | line_length = 79
8 | multi_line_output = 0
9 | extra_standard_library = setuptools
10 | known_first_party = mmseg
11 | known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,torch,ts
12 | no_lines_before = STDLIB,LOCALFOLDER
13 | default_section = THIRDPARTY
14 |
15 | # ignore-words-list needs to be lowercase format. For example, if we want to
16 | # ignore word "BA", then we need to append "ba" to ignore-words-list rather
17 | # than "BA"
18 | [codespell]
19 | skip = *.po,*.ts,*.ipynb
20 | count =
21 | quiet-level = 3
22 | ignore-words-list = formating,sur,hist,dota,ba
23 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/color.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/color.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/gray.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/gray.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/gtFine/frankfurt_000000_000294_gtFine_instanceIds.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/gtFine/frankfurt_000000_000294_gtFine_instanceIds.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/gtFine/frankfurt_000000_000294_gtFine_labelIds.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/gtFine/frankfurt_000000_000294_gtFine_labelIds.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/gtFine/frankfurt_000000_000294_gtFine_labelTrainIds.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/gtFine/frankfurt_000000_000294_gtFine_labelTrainIds.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/leftImg8bit/frankfurt_000000_000294_leftImg8bit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_cityscapes_dataset/leftImg8bit/frankfurt_000000_000294_leftImg8bit.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/gts/00000_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/gts/00000_gt.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/gts/00001_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/gts/00001_gt.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/gts/00002_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/gts/00002_gt.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/gts/00003_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/gts/00003_gt.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/gts/00004_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/gts/00004_gt.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00000_img.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00000_img.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00001_img.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00001_img.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00002_img.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00002_img.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00003_img.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00003_img.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00004_img.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_dataset/imgs/00004_img.jpg
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/splits/train.txt:
--------------------------------------------------------------------------------
1 | 00000
2 | 00001
3 | 00002
4 | 00003
5 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_dataset/splits/val.txt:
--------------------------------------------------------------------------------
1 | 00004
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1024_1920_instance_color_RGB.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1024_1920_instance_color_RGB.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1536_2432_instance_color_RGB.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_isaid_dataset/ann_dir/P0000_0_896_1536_2432_instance_color_RGB.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1024_1920.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1024_1920.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1536_2432.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_isaid_dataset/img_dir/P0000_0_896_1536_2432.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_isaid_dataset/splits/train.txt:
--------------------------------------------------------------------------------
1 | P0000_0_896_1536_2432
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_isaid_dataset/splits/val.txt:
--------------------------------------------------------------------------------
1 | P0000_0_896_1024_1920
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_loveda_dataset/ann_dir/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_loveda_dataset/ann_dir/0.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_loveda_dataset/ann_dir/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_loveda_dataset/ann_dir/1.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_loveda_dataset/ann_dir/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_loveda_dataset/ann_dir/2.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_loveda_dataset/img_dir/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_loveda_dataset/img_dir/0.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_loveda_dataset/img_dir/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_loveda_dataset/img_dir/1.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_loveda_dataset/img_dir/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_loveda_dataset/img_dir/2.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_potsdam_dataset/ann_dir/2_10_0_0_512_512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_potsdam_dataset/ann_dir/2_10_0_0_512_512.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_potsdam_dataset/img_dir/2_10_0_0_512_512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_potsdam_dataset/img_dir/2_10_0_0_512_512.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_vaihingen_dataset/ann_dir/area1_0_0_512_512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_vaihingen_dataset/ann_dir/area1_0_0_512_512.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/pseudo_vaihingen_dataset/img_dir/area1_0_0_512_512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/pseudo_vaihingen_dataset/img_dir/area1_0_0_512_512.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/data/seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggjy/FastMIM.pytorch/d4eea0dc8caf3db4be3d3ed4c7a8bc07c59b6111/mmseg-v0.28/tests/data/seg.png
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_digit_version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmseg import digit_version
3 |
4 |
5 | def test_digit_version():
6 | assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0)
7 | assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0)
8 | assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0)
9 | assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1)
10 | assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0)
11 | assert digit_version('1.0') == digit_version('1.0.0')
12 | assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5')
13 | assert digit_version('1.0.0dev') < digit_version('1.0.0a')
14 | assert digit_version('1.0.0a') < digit_version('1.0.0a1')
15 | assert digit_version('1.0.0a') < digit_version('1.0.0b')
16 | assert digit_version('1.0.0b') < digit_version('1.0.0rc')
17 | assert digit_version('1.0.0rc1') < digit_version('1.0.0')
18 | assert digit_version('1.0.0') < digit_version('1.0.0post')
19 | assert digit_version('1.0.0post') < digit_version('1.0.0post1')
20 | assert digit_version('v1') == (1, 0, 0, 0, 0, 0)
21 | assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)
22 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_inference.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import os.path as osp
3 |
4 | import mmcv
5 |
6 | from mmseg.apis import inference_segmentor, init_segmentor
7 |
8 |
9 | def test_test_time_augmentation_on_cpu():
10 | config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
11 | config = mmcv.Config.fromfile(config_file)
12 |
13 | # Remove pretrain model download for testing
14 | config.model.pretrained = None
15 | # Replace SyncBN with BN to inference on CPU
16 | norm_cfg = dict(type='BN', requires_grad=True)
17 | config.model.backbone.norm_cfg = norm_cfg
18 | config.model.decode_head.norm_cfg = norm_cfg
19 | config.model.auxiliary_head.norm_cfg = norm_cfg
20 |
21 | # Enable test time augmentation
22 | config.data.test.pipeline[1].flip = True
23 |
24 | checkpoint_file = None
25 | model = init_segmentor(config, checkpoint_file, device='cpu')
26 |
27 | img = mmcv.imread(
28 | osp.join(osp.dirname(__file__), 'data/color.jpg'), 'color')
29 | result = inference_segmentor(model, img)
30 | assert result[0].shape == (288, 512)
31 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_backbones/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .utils import all_zeros, check_norm_state, is_block, is_norm
3 |
4 | __all__ = ['is_norm', 'is_block', 'all_zeros', 'check_norm_state']
5 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_backbones/test_fast_scnn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 | import torch
4 |
5 | from mmseg.models.backbones import FastSCNN
6 |
7 |
8 | def test_fastscnn_backbone():
9 | with pytest.raises(AssertionError):
10 | # Fast-SCNN channel constraints.
11 | FastSCNN(
12 | 3, (32, 48),
13 | 64, (64, 96, 128), (2, 2, 1),
14 | global_out_channels=127,
15 | higher_in_channels=64,
16 | lower_in_channels=128)
17 |
18 | # Test FastSCNN Standard Forward
19 | model = FastSCNN(
20 | in_channels=3,
21 | downsample_dw_channels=(4, 6),
22 | global_in_channels=8,
23 | global_block_channels=(8, 12, 16),
24 | global_block_strides=(2, 2, 1),
25 | global_out_channels=16,
26 | higher_in_channels=8,
27 | lower_in_channels=16,
28 | fusion_out_channels=16,
29 | )
30 | model.init_weights()
31 | model.train()
32 | batch_size = 4
33 | imgs = torch.randn(batch_size, 3, 64, 128)
34 | feat = model(imgs)
35 |
36 | assert len(feat) == 3
37 | # higher-res
38 | assert feat[0].shape == torch.Size([batch_size, 8, 8, 16])
39 | # lower-res
40 | assert feat[1].shape == torch.Size([batch_size, 16, 2, 4])
41 | # FFM output
42 | assert feat[2].shape == torch.Size([batch_size, 16, 8, 16])
43 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_ann_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import ANNHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_ann_head():
9 |
10 | inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 8, 21, 21)]
11 | head = ANNHead(
12 | in_channels=[4, 8],
13 | channels=2,
14 | num_classes=19,
15 | in_index=[-2, -1],
16 | project_channels=8)
17 | if torch.cuda.is_available():
18 | head, inputs = to_cuda(head, inputs)
19 | outputs = head(inputs)
20 | assert outputs.shape == (1, head.num_classes, 21, 21)
21 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_cc_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 | import torch
4 |
5 | from mmseg.models.decode_heads import CCHead
6 | from .utils import to_cuda
7 |
8 |
9 | def test_cc_head():
10 | head = CCHead(in_channels=16, channels=8, num_classes=19)
11 | assert len(head.convs) == 2
12 | assert hasattr(head, 'cca')
13 | if not torch.cuda.is_available():
14 | pytest.skip('CCHead requires CUDA')
15 | inputs = [torch.randn(1, 16, 23, 23)]
16 | head, inputs = to_cuda(head, inputs)
17 | outputs = head(inputs)
18 | assert outputs.shape == (1, head.num_classes, 23, 23)
19 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_da_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import DAHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_da_head():
9 |
10 | inputs = [torch.randn(1, 16, 23, 23)]
11 | head = DAHead(in_channels=16, channels=8, num_classes=19, pam_channels=8)
12 | if torch.cuda.is_available():
13 | head, inputs = to_cuda(head, inputs)
14 | outputs = head(inputs)
15 | assert isinstance(outputs, tuple) and len(outputs) == 3
16 | for output in outputs:
17 | assert output.shape == (1, head.num_classes, 23, 23)
18 | test_output = head.forward_test(inputs, None, None)
19 | assert test_output.shape == (1, head.num_classes, 23, 23)
20 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_ema_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import EMAHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_emanet_head():
9 | head = EMAHead(
10 | in_channels=4,
11 | ema_channels=3,
12 | channels=2,
13 | num_stages=3,
14 | num_bases=2,
15 | num_classes=19)
16 | for param in head.ema_mid_conv.parameters():
17 | assert not param.requires_grad
18 | assert hasattr(head, 'ema_module')
19 | inputs = [torch.randn(1, 4, 23, 23)]
20 | if torch.cuda.is_available():
21 | head, inputs = to_cuda(head, inputs)
22 | outputs = head(inputs)
23 | assert outputs.shape == (1, head.num_classes, 23, 23)
24 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_gc_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import GCHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_gc_head():
9 | head = GCHead(in_channels=4, channels=4, num_classes=19)
10 | assert len(head.convs) == 2
11 | assert hasattr(head, 'gc_block')
12 | inputs = [torch.randn(1, 4, 23, 23)]
13 | if torch.cuda.is_available():
14 | head, inputs = to_cuda(head, inputs)
15 | outputs = head(inputs)
16 | assert outputs.shape == (1, head.num_classes, 23, 23)
17 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_isa_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import ISAHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_isa_head():
9 |
10 | inputs = [torch.randn(1, 8, 23, 23)]
11 | isa_head = ISAHead(
12 | in_channels=8,
13 | channels=4,
14 | num_classes=19,
15 | isa_channels=4,
16 | down_factor=(8, 8))
17 | if torch.cuda.is_available():
18 | isa_head, inputs = to_cuda(isa_head, inputs)
19 | output = isa_head(inputs)
20 | assert output.shape == (1, isa_head.num_classes, 23, 23)
21 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_nl_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import NLHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_nl_head():
9 | head = NLHead(in_channels=8, channels=4, num_classes=19)
10 | assert len(head.convs) == 2
11 | assert hasattr(head, 'nl_block')
12 | inputs = [torch.randn(1, 8, 23, 23)]
13 | if torch.cuda.is_available():
14 | head, inputs = to_cuda(head, inputs)
15 | outputs = head(inputs)
16 | assert outputs.shape == (1, head.num_classes, 23, 23)
17 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_ocr_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import FCNHead, OCRHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_ocr_head():
9 |
10 | inputs = [torch.randn(1, 8, 23, 23)]
11 | ocr_head = OCRHead(
12 | in_channels=8, channels=4, num_classes=19, ocr_channels=8)
13 | fcn_head = FCNHead(in_channels=8, channels=4, num_classes=19)
14 | if torch.cuda.is_available():
15 | head, inputs = to_cuda(ocr_head, inputs)
16 | head, inputs = to_cuda(fcn_head, inputs)
17 | prev_output = fcn_head(inputs)
18 | output = ocr_head(inputs, prev_output)
19 | assert output.shape == (1, ocr_head.num_classes, 23, 23)
20 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_psp_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 | import torch
4 |
5 | from mmseg.models.decode_heads import PSPHead
6 | from .utils import _conv_has_norm, to_cuda
7 |
8 |
9 | def test_psp_head():
10 |
11 | with pytest.raises(AssertionError):
12 | # pool_scales must be list|tuple
13 | PSPHead(in_channels=4, channels=2, num_classes=19, pool_scales=1)
14 |
15 | # test no norm_cfg
16 | head = PSPHead(in_channels=4, channels=2, num_classes=19)
17 | assert not _conv_has_norm(head, sync_bn=False)
18 |
19 | # test with norm_cfg
20 | head = PSPHead(
21 | in_channels=4,
22 | channels=2,
23 | num_classes=19,
24 | norm_cfg=dict(type='SyncBN'))
25 | assert _conv_has_norm(head, sync_bn=True)
26 |
27 | inputs = [torch.randn(1, 4, 23, 23)]
28 | head = PSPHead(
29 | in_channels=4, channels=2, num_classes=19, pool_scales=(1, 2, 3))
30 | if torch.cuda.is_available():
31 | head, inputs = to_cuda(head, inputs)
32 | assert head.psp_modules[0][0].output_size == 1
33 | assert head.psp_modules[1][0].output_size == 2
34 | assert head.psp_modules[2][0].output_size == 3
35 | outputs = head(inputs)
36 | assert outputs.shape == (1, head.num_classes, 23, 23)
37 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_segformer_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 | import torch
4 |
5 | from mmseg.models.decode_heads import SegformerHead
6 |
7 |
8 | def test_segformer_head():
9 | with pytest.raises(AssertionError):
10 | # `in_channels` must have same length as `in_index`
11 | SegformerHead(
12 | in_channels=(1, 2, 3), in_index=(0, 1), channels=5, num_classes=2)
13 |
14 | H, W = (64, 64)
15 | in_channels = (32, 64, 160, 256)
16 | shapes = [(H // 2**(i + 2), W // 2**(i + 2))
17 | for i in range(len(in_channels))]
18 | model = SegformerHead(
19 | in_channels=in_channels,
20 | in_index=[0, 1, 2, 3],
21 | channels=256,
22 | num_classes=19)
23 |
24 | with pytest.raises(IndexError):
25 | # in_index must match the input feature maps.
26 | inputs = [
27 | torch.randn((1, in_channel, *shape))
28 | for in_channel, shape in zip(in_channels, shapes)
29 | ][:3]
30 | temp = model(inputs)
31 |
32 | # Normal Input
33 | # ((1, 32, 16, 16), (1, 64, 8, 8), (1, 160, 4, 4), (1, 256, 2, 2)
34 | inputs = [
35 | torch.randn((1, in_channel, *shape))
36 | for in_channel, shape in zip(in_channels, shapes)
37 | ]
38 | temp = model(inputs)
39 |
40 | assert temp.shape == (1, 19, H // 4, W // 4)
41 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_segmenter_mask_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import SegmenterMaskTransformerHead
5 | from .utils import _conv_has_norm, to_cuda
6 |
7 |
8 | def test_segmenter_mask_transformer_head():
9 | head = SegmenterMaskTransformerHead(
10 | in_channels=2,
11 | channels=2,
12 | num_classes=150,
13 | num_layers=2,
14 | num_heads=3,
15 | embed_dims=192,
16 | dropout_ratio=0.0)
17 | assert _conv_has_norm(head, sync_bn=True)
18 | head.init_weights()
19 |
20 | inputs = [torch.randn(1, 2, 32, 32)]
21 | if torch.cuda.is_available():
22 | head, inputs = to_cuda(head, inputs)
23 | outputs = head(inputs)
24 | assert outputs.shape == (1, head.num_classes, 32, 32)
25 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_stdc_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models.decode_heads import STDCHead
5 | from .utils import to_cuda
6 |
7 |
8 | def test_stdc_head():
9 | inputs = [torch.randn(1, 32, 21, 21)]
10 | head = STDCHead(
11 | in_channels=32,
12 | channels=8,
13 | num_convs=1,
14 | num_classes=2,
15 | in_index=-1,
16 | loss_decode=[
17 | dict(
18 | type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0),
19 | dict(type='DiceLoss', loss_name='loss_dice', loss_weight=1.0)
20 | ])
21 | if torch.cuda.is_available():
22 | head, inputs = to_cuda(head, inputs)
23 | outputs = head(inputs)
24 | assert isinstance(outputs, torch.Tensor) and len(outputs) == 1
25 | assert outputs.shape == torch.Size([1, head.num_classes, 21, 21])
26 |
27 | fake_label = torch.ones_like(
28 | outputs[:, 0:1, :, :], dtype=torch.int16).long()
29 | loss = head.losses(seg_logit=outputs, seg_label=fake_label)
30 | assert loss['loss_ce'] != torch.zeros_like(loss['loss_ce'])
31 | assert loss['loss_dice'] != torch.zeros_like(loss['loss_dice'])
32 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/test_uper_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import pytest
3 | import torch
4 |
5 | from mmseg.models.decode_heads import UPerHead
6 | from .utils import _conv_has_norm, to_cuda
7 |
8 |
9 | def test_uper_head():
10 |
11 | with pytest.raises(AssertionError):
12 | # fpn_in_channels must be list|tuple
13 | UPerHead(in_channels=4, channels=2, num_classes=19)
14 |
15 | # test no norm_cfg
16 | head = UPerHead(
17 | in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1])
18 | assert not _conv_has_norm(head, sync_bn=False)
19 |
20 | # test with norm_cfg
21 | head = UPerHead(
22 | in_channels=[4, 2],
23 | channels=2,
24 | num_classes=19,
25 | norm_cfg=dict(type='SyncBN'),
26 | in_index=[-2, -1])
27 | assert _conv_has_norm(head, sync_bn=True)
28 |
29 | inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 2, 21, 21)]
30 | head = UPerHead(
31 | in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1])
32 | if torch.cuda.is_available():
33 | head, inputs = to_cuda(head, inputs)
34 | outputs = head(inputs)
35 | assert outputs.shape == (1, head.num_classes, 45, 45)
36 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_heads/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmcv.cnn import ConvModule
3 | from mmcv.utils.parrots_wrapper import SyncBatchNorm
4 |
5 |
6 | def _conv_has_norm(module, sync_bn):
7 | for m in module.modules():
8 | if isinstance(m, ConvModule):
9 | if not m.with_norm:
10 | return False
11 | if sync_bn:
12 | if not isinstance(m.bn, SyncBatchNorm):
13 | return False
14 | return True
15 |
16 |
17 | def to_cuda(module, data):
18 | module = module.cuda()
19 | if isinstance(data, list):
20 | for i in range(len(data)):
21 | data[i] = data[i].cuda()
22 | return module, data
23 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_losses/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_necks/test_fpn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models import FPN
5 |
6 |
7 | def test_fpn():
8 | in_channels = [64, 128, 256, 512]
9 | inputs = [
10 | torch.randn(1, c, 56 // 2**i, 56 // 2**i)
11 | for i, c in enumerate(in_channels)
12 | ]
13 |
14 | fpn = FPN(in_channels, 64, len(in_channels))
15 | outputs = fpn(inputs)
16 | assert outputs[0].shape == torch.Size([1, 64, 56, 56])
17 | assert outputs[1].shape == torch.Size([1, 64, 28, 28])
18 | assert outputs[2].shape == torch.Size([1, 64, 14, 14])
19 | assert outputs[3].shape == torch.Size([1, 64, 7, 7])
20 |
21 | fpn = FPN(
22 | in_channels,
23 | 64,
24 | len(in_channels),
25 | upsample_cfg=dict(mode='nearest', scale_factor=2.0))
26 | outputs = fpn(inputs)
27 | assert outputs[0].shape == torch.Size([1, 64, 56, 56])
28 | assert outputs[1].shape == torch.Size([1, 64, 28, 28])
29 | assert outputs[2].shape == torch.Size([1, 64, 14, 14])
30 | assert outputs[3].shape == torch.Size([1, 64, 7, 7])
31 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_necks/test_mla_neck.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models import MLANeck
5 |
6 |
7 | def test_mla():
8 | in_channels = [4, 4, 4, 4]
9 | mla = MLANeck(in_channels, 32)
10 |
11 | inputs = [torch.randn(1, c, 12, 12) for i, c in enumerate(in_channels)]
12 | outputs = mla(inputs)
13 | assert outputs[0].shape == torch.Size([1, 32, 12, 12])
14 | assert outputs[1].shape == torch.Size([1, 32, 12, 12])
15 | assert outputs[2].shape == torch.Size([1, 32, 12, 12])
16 | assert outputs[3].shape == torch.Size([1, 32, 12, 12])
17 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_necks/test_multilevel_neck.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 |
4 | from mmseg.models import MultiLevelNeck
5 |
6 |
7 | def test_multilevel_neck():
8 |
9 | # Test init_weights
10 | MultiLevelNeck([266], 32).init_weights()
11 |
12 | # Test multi feature maps
13 | in_channels = [32, 64, 128, 256]
14 | inputs = [torch.randn(1, c, 14, 14) for i, c in enumerate(in_channels)]
15 |
16 | neck = MultiLevelNeck(in_channels, 32)
17 | outputs = neck(inputs)
18 | assert outputs[0].shape == torch.Size([1, 32, 7, 7])
19 | assert outputs[1].shape == torch.Size([1, 32, 14, 14])
20 | assert outputs[2].shape == torch.Size([1, 32, 28, 28])
21 | assert outputs[3].shape == torch.Size([1, 32, 56, 56])
22 |
23 | # Test one feature map
24 | in_channels = [768]
25 | inputs = [torch.randn(1, 768, 14, 14)]
26 |
27 | neck = MultiLevelNeck(in_channels, 32)
28 | outputs = neck(inputs)
29 | assert outputs[0].shape == torch.Size([1, 32, 7, 7])
30 | assert outputs[1].shape == torch.Size([1, 32, 14, 14])
31 | assert outputs[2].shape == torch.Size([1, 32, 28, 28])
32 | assert outputs[3].shape == torch.Size([1, 32, 56, 56])
33 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_segmentors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tests/test_models/test_utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tools/dist_test.sh:
--------------------------------------------------------------------------------
1 | CONFIG=$1
2 | CHECKPOINT=$2
3 | GPUS=$3
4 | NNODES=${NNODES:-1}
5 | NODE_RANK=${NODE_RANK:-0}
6 | PORT=${PORT:-29500}
7 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
8 |
9 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
10 | python -m torch.distributed.launch \
11 | --nnodes=$NNODES \
12 | --node_rank=$NODE_RANK \
13 | --master_addr=$MASTER_ADDR \
14 | --nproc_per_node=$GPUS \
15 | --master_port=$PORT \
16 | $(dirname "$0")/test.py \
17 | $CONFIG \
18 | $CHECKPOINT \
19 | --launcher pytorch \
20 | ${@:4}
21 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tools/dist_train.sh:
--------------------------------------------------------------------------------
1 | CONFIG=$1
2 | GPUS=$2
3 | NNODES=${NNODES:-1}
4 | NODE_RANK=${NODE_RANK:-0}
5 | PORT=${PORT:-29500}
6 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
7 |
8 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
9 | python -m torch.distributed.launch \
10 | --nnodes=$NNODES \
11 | --node_rank=$NODE_RANK \
12 | --master_addr=$MASTER_ADDR \
13 | --nproc_per_node=$GPUS \
14 | --master_port=$PORT \
15 | $(dirname "$0")/train.py \
16 | $CONFIG \
17 | --launcher pytorch ${@:3}
18 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tools/publish_model.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import argparse
3 | import subprocess
4 |
5 | import torch
6 |
7 |
8 | def parse_args():
9 | parser = argparse.ArgumentParser(
10 | description='Process a checkpoint to be published')
11 | parser.add_argument('in_file', help='input checkpoint filename')
12 | parser.add_argument('out_file', help='output checkpoint filename')
13 | args = parser.parse_args()
14 | return args
15 |
16 |
17 | def process_checkpoint(in_file, out_file):
18 | checkpoint = torch.load(in_file, map_location='cpu')
19 | # remove optimizer for smaller file size
20 | if 'optimizer' in checkpoint:
21 | del checkpoint['optimizer']
22 | # if it is necessary to remove some sensitive data in checkpoint['meta'],
23 | # add the code here.
24 | torch.save(checkpoint, out_file)
25 | sha = subprocess.check_output(['sha256sum', out_file]).decode()
26 | final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
27 | subprocess.Popen(['mv', out_file, final_file])
28 |
29 |
30 | def main():
31 | args = parse_args()
32 | process_checkpoint(args.in_file, args.out_file)
33 |
34 |
35 | if __name__ == '__main__':
36 | main()
37 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tools/slurm_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | CHECKPOINT=$4
9 | GPUS=${GPUS:-4}
10 | GPUS_PER_NODE=${GPUS_PER_NODE:-4}
11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12 | PY_ARGS=${@:5}
13 | SRUN_ARGS=${SRUN_ARGS:-""}
14 |
15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16 | srun -p ${PARTITION} \
17 | --job-name=${JOB_NAME} \
18 | --gres=gpu:${GPUS_PER_NODE} \
19 | --ntasks=${GPUS} \
20 | --ntasks-per-node=${GPUS_PER_NODE} \
21 | --cpus-per-task=${CPUS_PER_TASK} \
22 | --kill-on-bad-exit=1 \
23 | ${SRUN_ARGS} \
24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS}
25 |
--------------------------------------------------------------------------------
/mmseg-v0.28/tools/slurm_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | GPUS=${GPUS:-4}
9 | GPUS_PER_NODE=${GPUS_PER_NODE:-4}
10 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
11 | SRUN_ARGS=${SRUN_ARGS:-""}
12 | PY_ARGS=${@:4}
13 |
14 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
15 | srun -p ${PARTITION} \
16 | --job-name=${JOB_NAME} \
17 | --gres=gpu:${GPUS_PER_NODE} \
18 | --ntasks=${GPUS} \
19 | --ntasks-per-node=${GPUS_PER_NODE} \
20 | --cpus-per-task=${CPUS_PER_TASK} \
21 | --kill-on-bad-exit=1 \
22 | ${SRUN_ARGS} \
23 | python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS}
24 |
--------------------------------------------------------------------------------
/requirement_pip_install.sh:
--------------------------------------------------------------------------------
1 | pip install --upgrade pip;
2 | pip install torch==1.7.0 torchvision==0.8.1;
3 | pip install timm==0.3.2;
4 | pip install cupy-cuda101;
5 | pip install torchprofile;
6 | pip install einops;
7 | pip install tensorboardX;
8 | pip install yapf;
9 | pip install cython;
10 | pip install opencv-python;
11 | pip install pytest-runner;
12 | pip install terminaltables;
13 | pip install mmpycocotools;
14 | pip install packaging==21.0;
15 | pip install mmcls==0.21.0;
16 | pip install mmcv-full==1.4.4;
--------------------------------------------------------------------------------
/util/lr_sched.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | # All rights reserved.
3 |
4 | # This source code is licensed under the license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | # Modified by Jianyuan Guo (jyguo@pku.edu.cn)
7 |
8 | import math
9 |
10 | def adjust_learning_rate(optimizer, epoch, args):
11 | """Decay the learning rate with half-cycle cosine after warmup"""
12 | if epoch < args.warmup_epochs:
13 | lr = args.lr * epoch / args.warmup_epochs
14 | else:
15 | lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
16 | (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
17 | for param_group in optimizer.param_groups:
18 | if "lr_scale" in param_group:
19 | param_group["lr"] = lr * param_group["lr_scale"]
20 | else:
21 | param_group["lr"] = lr
22 | return lr
23 |
24 | def adjust_learning_rate_step(optimizer, epoch, args):
25 | if epoch < args.warmup_epochs:
26 | lr = args.lr * epoch / args.warmup_epochs
27 | else:
28 | lr = args.lr * (0.1 ** (epoch // args.decay_epoch))
29 | for param_group in optimizer.param_groups:
30 | if "lr_scale" in param_group:
31 | param_group["lr"] = lr * param_group["lr_scale"]
32 | else:
33 | param_group["lr"] = lr
34 | return lr
35 |
--------------------------------------------------------------------------------