├── PLUG-Det ├── requirements │ ├── mminstall.txt │ ├── readthedocs.txt │ ├── albu.txt │ ├── runtime.txt │ ├── optional.txt │ ├── build.txt │ ├── docs.txt │ └── tests.txt ├── requirements.txt ├── mmdet │ ├── models │ │ ├── necks │ │ │ └── __init__.py │ │ ├── backbones │ │ │ ├── __pycache__ │ │ │ │ ├── hrnet.cpython-37.pyc │ │ │ │ ├── pvt.cpython-37.pyc │ │ │ │ ├── swin.cpython-37.pyc │ │ │ │ ├── vgg.cpython-37.pyc │ │ │ │ ├── DFFTNet.cpython-37.pyc │ │ │ │ ├── darknet.cpython-37.pyc │ │ │ │ ├── regnet.cpython-37.pyc │ │ │ │ ├── res2net.cpython-37.pyc │ │ │ │ ├── resnest.cpython-37.pyc │ │ │ │ ├── resnet.cpython-37.pyc │ │ │ │ ├── resnext.cpython-37.pyc │ │ │ │ ├── ssd_vgg.cpython-37.pyc │ │ │ │ ├── tae_ca.cpython-37.pyc │ │ │ │ ├── CA_layer.cpython-37.pyc │ │ │ │ ├── DOT_blocks.cpython-37.pyc │ │ │ │ ├── SA_layer.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── hourglass.cpython-37.pyc │ │ │ │ ├── csp_darknet.cpython-37.pyc │ │ │ │ ├── mobilenet_v2.cpython-37.pyc │ │ │ │ ├── trident_resnet.cpython-37.pyc │ │ │ │ ├── detectors_resnet.cpython-37.pyc │ │ │ │ └── detectors_resnext.cpython-37.pyc │ │ │ ├── __init__.py │ │ │ └── vgg.py │ │ ├── roi_heads │ │ │ ├── mask_heads │ │ │ │ └── __init__.py │ │ │ ├── roi_extractors │ │ │ │ ├── __init__.py │ │ │ │ └── base_roi_extractor.py │ │ │ ├── bbox_heads │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── base_roi_head.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ └── brick_wrappers.py │ │ ├── detectors │ │ │ ├── __init__.py │ │ │ ├── mask_rcnn.py │ │ │ ├── faster_rcnn.py │ │ │ ├── weak_rcnn.py │ │ │ └── PLUG.py │ │ ├── losses │ │ │ ├── expand_onehot_labels.py │ │ │ ├── __init__.py │ │ │ ├── accuracy.py │ │ │ ├── utils.py │ │ │ └── gaussian_focal_loss.py │ │ ├── __init__.py │ │ ├── dense_heads │ │ │ └── __init__.py │ │ └── builder.py │ ├── core │ │ ├── hook │ │ │ ├── __pycache__ │ │ │ │ ├── ema.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── checkloss_hook.cpython-37.pyc │ │ │ │ ├── sync_norm_hook.cpython-37.pyc │ │ │ │ ├── set_epoch_info_hook.cpython-37.pyc │ │ │ │ ├── set_iter_info_hook.cpython-37.pyc │ │ │ │ ├── sync_random_size_hook.cpython-37.pyc │ │ │ │ ├── yolox_lrupdater_hook.cpython-37.pyc │ │ │ │ └── yolox_mode_switch_hook.cpython-37.pyc │ │ │ ├── set_epoch_info_hook.py │ │ │ ├── set_iter_info_hook.py │ │ │ ├── __init__.py │ │ │ ├── checkloss_hook.py │ │ │ ├── sync_norm_hook.py │ │ │ ├── yolox_mode_switch_hook.py │ │ │ ├── yolox_lrupdater_hook.py │ │ │ └── sync_random_size_hook.py │ │ ├── anchor │ │ │ ├── __pycache__ │ │ │ │ ├── utils.cpython-37.pyc │ │ │ │ ├── builder.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── point_generator.cpython-37.pyc │ │ │ │ └── anchor_generator.cpython-37.pyc │ │ │ ├── builder.py │ │ │ ├── __init__.py │ │ │ └── utils.py │ │ ├── bbox │ │ │ ├── __pycache__ │ │ │ │ ├── builder.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── demodata.cpython-37.pyc │ │ │ │ └── transforms.cpython-37.pyc │ │ │ ├── coder │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ │ ├── base_bbox_coder.cpython-37.pyc │ │ │ │ │ ├── tblr_bbox_coder.cpython-37.pyc │ │ │ │ │ ├── yolo_bbox_coder.cpython-37.pyc │ │ │ │ │ ├── pseudo_bbox_coder.cpython-37.pyc │ │ │ │ │ ├── bucketing_bbox_coder.cpython-37.pyc │ │ │ │ │ ├── delta_xywh_bbox_coder.cpython-37.pyc │ │ │ │ │ ├── distance_point_bbox_coder.cpython-37.pyc │ │ │ │ │ └── legacy_delta_xywh_bbox_coder.cpython-37.pyc │ │ │ │ ├── __init__.py │ │ │ │ └── base_bbox_coder.py │ │ │ ├── samplers │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ │ ├── base_sampler.cpython-37.pyc │ │ │ │ │ ├── ohem_sampler.cpython-37.pyc │ │ │ │ │ ├── pseudo_sampler.cpython-37.pyc │ │ │ │ │ ├── random_sampler.cpython-37.pyc │ │ │ │ │ ├── sampling_result.cpython-37.pyc │ │ │ │ │ ├── combined_sampler.cpython-37.pyc │ │ │ │ │ ├── score_hlr_sampler.cpython-37.pyc │ │ │ │ │ ├── mask_pseudo_sampler.cpython-37.pyc │ │ │ │ │ ├── mask_sampling_result.cpython-37.pyc │ │ │ │ │ ├── iou_balanced_neg_sampler.cpython-37.pyc │ │ │ │ │ └── instance_balanced_pos_sampler.cpython-37.pyc │ │ │ │ ├── __init__.py │ │ │ │ └── random_sampler.py │ │ │ ├── assigners │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ │ ├── assign_result.cpython-37.pyc │ │ │ │ │ ├── atss_assigner.cpython-37.pyc │ │ │ │ │ ├── base_assigner.cpython-37.pyc │ │ │ │ │ ├── grid_assigner.cpython-37.pyc │ │ │ │ │ ├── point_assigner.cpython-37.pyc │ │ │ │ │ ├── max_iou_assigner.cpython-37.pyc │ │ │ │ │ ├── region_assigner.cpython-37.pyc │ │ │ │ │ ├── sim_ota_assigner.cpython-37.pyc │ │ │ │ │ ├── uniform_assigner.cpython-37.pyc │ │ │ │ │ ├── hungarian_assigner.cpython-37.pyc │ │ │ │ │ ├── center_region_assigner.cpython-37.pyc │ │ │ │ │ ├── task_aligned_assigner.cpython-37.pyc │ │ │ │ │ ├── approx_max_iou_assigner.cpython-37.pyc │ │ │ │ │ └── mask_hungarian_assigner.cpython-37.pyc │ │ │ │ ├── __init__.py │ │ │ │ └── base_assigner.py │ │ │ ├── iou_calculators │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ │ ├── builder.cpython-37.pyc │ │ │ │ │ └── iou2d_calculator.cpython-37.pyc │ │ │ │ ├── __init__.py │ │ │ │ └── builder.py │ │ │ ├── builder.py │ │ │ ├── __init__.py │ │ │ └── demodata.py │ │ ├── mask │ │ │ ├── __pycache__ │ │ │ │ ├── utils.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── mask_target.cpython-37.pyc │ │ │ │ └── structures.cpython-37.pyc │ │ │ ├── __init__.py │ │ │ └── utils.py │ │ ├── utils │ │ │ ├── __pycache__ │ │ │ │ ├── misc.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ └── dist_utils.cpython-37.pyc │ │ │ └── __init__.py │ │ ├── evaluation │ │ │ ├── __pycache__ │ │ │ │ ├── recall.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── mean_ap.cpython-37.pyc │ │ │ │ ├── class_names.cpython-37.pyc │ │ │ │ ├── eval_hooks.cpython-37.pyc │ │ │ │ ├── bbox_overlaps.cpython-37.pyc │ │ │ │ └── panoptic_utils.cpython-37.pyc │ │ │ ├── panoptic_utils.py │ │ │ ├── __init__.py │ │ │ └── bbox_overlaps.py │ │ ├── visualization │ │ │ ├── __pycache__ │ │ │ │ ├── image.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ └── palette.cpython-37.pyc │ │ │ ├── __init__.py │ │ │ └── palette.py │ │ ├── data_structures │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── general_data.cpython-37.pyc │ │ │ │ └── instance_data.cpython-37.pyc │ │ │ └── __init__.py │ │ ├── post_processing │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── bbox_nms.cpython-37.pyc │ │ │ │ ├── matrix_nms.cpython-37.pyc │ │ │ │ └── merge_augs.cpython-37.pyc │ │ │ └── __init__.py │ │ └── __init__.py │ ├── datasets │ │ ├── api_wrappers │ │ │ ├── __init__.py │ │ │ └── coco_api.py │ │ ├── pipelines │ │ │ ├── formating.py │ │ │ ├── __init__.py │ │ │ └── compose.py │ │ ├── samplers │ │ │ ├── __init__.py │ │ │ └── distributed_sampler.py │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── collect_env.py │ │ ├── logger.py │ │ ├── util_random.py │ │ ├── misc.py │ │ ├── profiling.py │ │ ├── optimizer.py │ │ └── setup_env.py │ ├── version.py │ ├── apis │ │ └── __init__.py │ └── __init__.py ├── MANIFEST.in ├── tools │ ├── dist_train.sh │ ├── dist_test.sh │ ├── tide.py │ ├── slurm_test.sh │ ├── slurm_train.sh │ ├── analysis_tools │ │ ├── json单类划分.py │ │ ├── get_flops.py │ │ └── eval_metric.py │ └── misc │ │ ├── print_config.py │ │ ├── download_dataset.py │ │ └── get_image_metas.py ├── CITATION.cff ├── configs │ ├── mask_rcnn │ │ └── mask_rcnn_r50_fpn_1x_dota_c.py │ ├── _base_ │ │ ├── schedules │ │ │ ├── schedule_1x.py │ │ │ ├── schedule_20e.py │ │ │ └── schedule_2x.py │ │ ├── default_runtime.py │ │ ├── datasets │ │ │ ├── dota_instance.py │ │ │ ├── dota_cp_detection.py │ │ │ └── dota_detection.py │ │ └── models │ │ │ └── faster_rcnn_r50_fpn.py │ └── faster_rcnn │ │ └── faster_rcnn_r50_fpn_1x_dota.py ├── pytest.ini ├── data_process │ ├── bbox2json.py │ ├── segm2json.py │ ├── calculate_mIoU.py │ ├── iSAID_generate_single_point_annotation.py │ ├── line_chart.py │ ├── split_DOTA_image_and_json.py │ ├── bar_chart.py │ └── DOTA_generate_single_point_annotation.py ├── setup.cfg ├── configs_wsod │ ├── oicr_vgg16.py │ ├── oicr_bbox_vgg16.py │ ├── wsod2_vgg16.py │ ├── wsddn_vgg16.py │ ├── base.py │ └── oicr_res50.py ├── DetVisGUI │ └── LICENSE └── model-index.yml └── README.md /PLUG-Det/requirements/mminstall.txt: -------------------------------------------------------------------------------- 1 | mmcv-full>=1.3.17 2 | -------------------------------------------------------------------------------- /PLUG-Det/requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv 2 | torch 3 | torchvision 4 | -------------------------------------------------------------------------------- /PLUG-Det/requirements/albu.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 --no-binary qudida,albumentations 2 | -------------------------------------------------------------------------------- /PLUG-Det/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | pycocotools 4 | six 5 | terminaltables 6 | -------------------------------------------------------------------------------- /PLUG-Det/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | cityscapesscripts 2 | imagecorruptions 3 | scipy 4 | sklearn 5 | timm 6 | -------------------------------------------------------------------------------- /PLUG-Det/requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building mmdetection 2 | cython 3 | numpy 4 | -------------------------------------------------------------------------------- /PLUG-Det/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/build.txt 2 | -r requirements/optional.txt 3 | -r requirements/runtime.txt 4 | -r requirements/tests.txt 5 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .fpn import FPN 3 | 4 | __all__ = [ 5 | 'FPN', 6 | ] 7 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/ema.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/ema.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/anchor/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/__pycache__/builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/__pycache__/builder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/mask/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/mask/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/utils/__pycache__/misc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/utils/__pycache__/misc.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/__pycache__/builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/anchor/__pycache__/builder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/__pycache__/demodata.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/__pycache__/demodata.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/mask/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/mask/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/anchor/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/__pycache__/transforms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/__pycache__/transforms.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__pycache__/recall.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/evaluation/__pycache__/recall.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/mask/__pycache__/mask_target.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/mask/__pycache__/mask_target.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/mask/__pycache__/structures.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/mask/__pycache__/structures.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/utils/__pycache__/dist_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/utils/__pycache__/dist_utils.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/hrnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/hrnet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/pvt.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/pvt.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/swin.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/swin.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/vgg.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/vgg.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/evaluation/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__pycache__/mean_ap.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/evaluation/__pycache__/mean_ap.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/checkloss_hook.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/checkloss_hook.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/sync_norm_hook.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/sync_norm_hook.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/visualization/__pycache__/image.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/visualization/__pycache__/image.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/DFFTNet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/DFFTNet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/darknet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/darknet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/regnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/regnet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/res2net.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/res2net.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/resnest.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/resnest.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/resnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/resnet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/resnext.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/resnext.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/ssd_vgg.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/ssd_vgg.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/tae_ca.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/tae_ca.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/__pycache__/point_generator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/anchor/__pycache__/point_generator.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__pycache__/class_names.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/evaluation/__pycache__/class_names.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/visualization/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/visualization/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/visualization/__pycache__/palette.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/visualization/__pycache__/palette.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/CA_layer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/CA_layer.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/DOT_blocks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/DOT_blocks.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/SA_layer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/SA_layer.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/hourglass.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/hourglass.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/__pycache__/anchor_generator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/anchor/__pycache__/anchor_generator.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/data_structures/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/data_structures/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/set_epoch_info_hook.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/set_epoch_info_hook.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/set_iter_info_hook.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/set_iter_info_hook.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/post_processing/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/post_processing/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/csp_darknet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/csp_darknet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/mobilenet_v2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/mobilenet_v2.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/base_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/base_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/tblr_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/tblr_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/yolo_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/yolo_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/base_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/base_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/ohem_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/ohem_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__pycache__/panoptic_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/evaluation/__pycache__/panoptic_utils.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/sync_random_size_hook.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/sync_random_size_hook.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/yolox_lrupdater_hook.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/yolox_lrupdater_hook.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__pycache__/yolox_mode_switch_hook.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/hook/__pycache__/yolox_mode_switch_hook.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/post_processing/__pycache__/matrix_nms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/post_processing/__pycache__/matrix_nms.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/post_processing/__pycache__/merge_augs.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/post_processing/__pycache__/merge_augs.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/trident_resnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/trident_resnet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/atss_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/atss_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/base_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/base_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/grid_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/grid_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/point_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/point_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/pseudo_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/pseudo_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/iou_calculators/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/iou_calculators/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/iou_calculators/__pycache__/builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/iou_calculators/__pycache__/builder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/random_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/random_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/data_structures/__pycache__/general_data.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/data_structures/__pycache__/general_data.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/data_structures/__pycache__/instance_data.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/data_structures/__pycache__/instance_data.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .resnet import ResNet 3 | from .vgg import VGG16 4 | __all__ = [ 5 | 'ResNet','VGG16' 6 | ] 7 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/detectors_resnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/detectors_resnet.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/__pycache__/detectors_resnext.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/models/backbones/__pycache__/detectors_resnext.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/roi_heads/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .fcn_mask_head import FCNMaskHead 3 | 4 | __all__ = [ 5 | 'FCNMaskHead', 6 | ] 7 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/max_iou_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/max_iou_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/region_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/region_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/sim_ota_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/sim_ota_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/uniform_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/uniform_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/bucketing_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/bucketing_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/delta_xywh_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/delta_xywh_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/score_hlr_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/score_hlr_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/hungarian_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/hungarian_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/mask_pseudo_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/mask_pseudo_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/mask_sampling_result.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/mask_sampling_result.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/center_region_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/center_region_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/task_aligned_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/task_aligned_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/distance_point_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/distance_point_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/iou_calculators/__pycache__/iou2d_calculator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/iou_calculators/__pycache__/iou2d_calculator.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/approx_max_iou_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/approx_max_iou_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__pycache__/mask_hungarian_assigner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/assigners/__pycache__/mask_hungarian_assigner.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__pycache__/legacy_delta_xywh_bbox_coder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/coder/__pycache__/legacy_delta_xywh_bbox_coder.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heshitian/PLUG/HEAD/PLUG-Det/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/data_structures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .general_data import GeneralData 3 | from .instance_data import InstanceData 4 | 5 | __all__ = ['GeneralData', 'InstanceData'] 6 | -------------------------------------------------------------------------------- /PLUG-Det/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements/*.txt 2 | include mmdet/VERSION 3 | include mmdet/.mim/model-index.yml 4 | include mmdet/.mim/demo/*/* 5 | recursive-include mmdet/.mim/configs *.py *.yml 6 | recursive-include mmdet/.mim/tools *.sh *.py 7 | -------------------------------------------------------------------------------- /PLUG-Det/requirements/docs.txt: -------------------------------------------------------------------------------- 1 | docutils==0.16.0 2 | -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme 3 | recommonmark 4 | sphinx==4.0.2 5 | sphinx-copybutton 6 | sphinx_markdown_tables 7 | sphinx_rtd_theme==0.5.2 8 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_bbox_coder import BaseBBoxCoder 3 | from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder 4 | 5 | __all__ = [ 6 | 'BaseBBoxCoder', 'DeltaXYWHBBoxCoder', 7 | ] 8 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/roi_heads/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_roi_extractor import BaseRoIExtractor 3 | from .single_level_roi_extractor import SingleRoIExtractor 4 | 5 | __all__ = ['BaseRoIExtractor', 'SingleRoIExtractor'] 6 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/iou_calculators/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import build_iou_calculator 3 | from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps 4 | 5 | __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] 6 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .assign_result import AssignResult 3 | from .base_assigner import BaseAssigner 4 | from .max_iou_assigner import MaxIoUAssigner 5 | 6 | __all__ = [ 7 | 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 8 | ] 9 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_sampler import BaseSampler 3 | from .random_sampler import RandomSampler 4 | from .sampling_result import SamplingResult 5 | 6 | __all__ = [ 7 | 'BaseSampler', 'RandomSampler','SamplingResult', 8 | ] 9 | -------------------------------------------------------------------------------- /PLUG-Det/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | PORT=${PORT:-29500} 6 | 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /PLUG-Det/CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - name: "MMDetection Contributors" 5 | title: "OpenMMLab Detection Toolbox and Benchmark" 6 | date-released: 2018-08-22 7 | url: "https://github.com/open-mmlab/mmdetection" 8 | license: Apache-2.0 9 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/api_wrappers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .coco_api import COCO, COCOeval 3 | from .panoptic_evaluation import pq_compute_multi_core, pq_compute_single_core 4 | 5 | __all__ = [ 6 | 'COCO', 'COCOeval', 'pq_compute_multi_core', 'pq_compute_single_core' 7 | ] 8 | -------------------------------------------------------------------------------- /PLUG-Det/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_dota_c.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/dota_instance.py', 4 | '../_base_/schedules/schedule_1x.py', 5 | '../_base_/default_runtime.py' 6 | ] 7 | work_dir = '/media/h/M/P2B/1dota/fuse_r0/0new/jdbj-0.3/mask_rcnn_r50/' 8 | -------------------------------------------------------------------------------- /PLUG-Det/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --xdoctest --xdoctest-style=auto 3 | norecursedirs = .git ignore build __pycache__ data docker docs .eggs 4 | 5 | filterwarnings= default 6 | ignore:.*No cfgstr given in Cacher constructor or call.*:Warning 7 | ignore:.*Define the __nice__ method for.*:Warning 8 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/panoptic_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | # A custom value to distinguish instance ID and category ID; need to 3 | # be greater than the number of categories. 4 | # For a pixel in the panoptic result map: 5 | # pan_id = ins_id * INSTANCE_OFFSET + cat_id 6 | INSTANCE_OFFSET = 1000 7 | -------------------------------------------------------------------------------- /PLUG-Det/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | CHECKPOINT=$2 5 | GPUS=$3 6 | PORT=${PORT:-29500} 7 | 8 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 9 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 10 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 11 | -------------------------------------------------------------------------------- /PLUG-Det/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | interrogate 5 | isort==4.3.21 6 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 7 | kwarray 8 | -e git+https://github.com/open-mmlab/mmtracking#egg=mmtrack 9 | onnx==1.7.0 10 | onnxruntime>=1.8.0 11 | pytest 12 | ubelt 13 | xdoctest>=0.10.0 14 | yapf 15 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/pipelines/formating.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | # flake8: noqa 3 | import warnings 4 | 5 | from .formatting import * 6 | 7 | warnings.warn('DeprecationWarning: mmdet.datasets.pipelines.formating will be ' 8 | 'deprecated, please replace it with ' 9 | 'mmdet.datasets.pipelines.formatting.') 10 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/iou_calculators/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import Registry, build_from_cfg 3 | 4 | IOU_CALCULATORS = Registry('IoU calculator') 5 | 6 | 7 | def build_iou_calculator(cfg, default_args=None): 8 | """Builder of IoU calculator.""" 9 | return build_from_cfg(cfg, IOU_CALCULATORS, default_args) 10 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/schedules/schedule_1x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[8, 11]) 11 | runner = dict(type='EpochBasedRunner', max_epochs=12) 12 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/schedules/schedule_20e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[16, 19]) 11 | runner = dict(type='EpochBasedRunner', max_epochs=20) 12 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/schedules/schedule_2x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[16, 22]) 11 | runner = dict(type='EpochBasedRunner', max_epochs=24) 12 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .mask_target import mask_target 3 | from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks 4 | from .utils import encode_mask_results, split_combined_polys 5 | 6 | __all__ = [ 7 | 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', 8 | 'PolygonMasks', 'encode_mask_results' 9 | ] 10 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .collect_env import collect_env 3 | from .logger import get_root_logger 4 | from .misc import find_latest_checkpoint 5 | from .setup_env import setup_multi_processes 6 | from .optimizer import DistOptimizerHook 7 | __all__ = [ 8 | 'get_root_logger', 'collect_env', 'find_latest_checkpoint', 9 | 'setup_multi_processes','DistOptimizerHook' 10 | ] 11 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .distributed_sampler import DistributedSampler 3 | from .group_sampler import DistributedGroupSampler, GroupSampler 4 | from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler 5 | 6 | __all__ = [ 7 | 'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler', 8 | 'InfiniteGroupBatchSampler', 'InfiniteBatchSampler' 9 | ] 10 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d 3 | from .builder import build_linear_layer, build_transformer 4 | from .res_layer import ResLayer, SimplifiedBasicBlock 5 | 6 | __all__ = [ 7 | 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 8 | 'build_transformer', 'build_linear_layer', 9 | 'ResLayer', 'SimplifiedBasicBlock', 10 | ] 11 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .anchor import * # noqa: F401, F403 3 | from .bbox import * # noqa: F401, F403 4 | from .data_structures import * # noqa: F401, F403 5 | from .evaluation import * # noqa: F401, F403 6 | from .hook import * # noqa: F401, F403 7 | from .mask import * # noqa: F401, F403 8 | from .post_processing import * # noqa: F401, F403 9 | from .utils import * # noqa: F401, F403 10 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | 5 | class BaseAssigner(metaclass=ABCMeta): 6 | """Base assigner that assigns boxes to ground truth boxes.""" 7 | 8 | @abstractmethod 9 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 10 | """Assign boxes to either a ground truth boxes or a negative boxes.""" 11 | -------------------------------------------------------------------------------- /PLUG-Det/tools/tide.py: -------------------------------------------------------------------------------- 1 | from tidecv import TIDE, datasets 2 | 3 | tide = TIDE() 4 | tide.evaluate(datasets.COCO(path='/media/h/M/dataset/AITOD/annotations/aitod_test_v1.json'), datasets.COCOResult('/media/h/M/dataset/AITOD/work_dirs/centernet/results_val1.bbox.json'), mode=TIDE.BOX) # Use TIDE.MASK for masks 5 | tide.summarize() # Summarize the results as tables in the console 6 | tide.plot() # Show a summary figure. Specify a folder and it'll output a png to that folder. 7 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .image import (color_val_matplotlib, imshow_det_bboxes, 3 | imshow_gt_det_bboxes, imshow_bboxes_points, imshow_det_bboxes_cp) 4 | from .palette import get_palette, palette_val 5 | 6 | __all__ = [ 7 | 'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib', 8 | 'palette_val', 'get_palette','imshow_bboxes_points','imshow_det_bboxes_cp' 9 | ] 10 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/roi_heads/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .bbox_head import BBoxHead 3 | from .MIL_bbox_head import Shared2FCInstanceMILHead 4 | from .wsddn_head import WSDDNHead 5 | from .oicr_head import OICRHead 6 | from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, 7 | Shared4Conv1FCBBoxHead) 8 | __all__ = [ 9 | 'BBoxHead', 'Shared2FCInstanceMILHead','WSDDNHead','OICRHead','ConvFCBBoxHead','Shared2FCBBoxHead','Shared4Conv1FCBBoxHead' 10 | ] 11 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/set_epoch_info_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.parallel import is_module_wrapper 3 | from mmcv.runner import HOOKS, Hook 4 | 5 | 6 | @HOOKS.register_module() 7 | class SetEpochInfoHook(Hook): 8 | """Set runner's epoch information to the model.""" 9 | 10 | def before_train_epoch(self, runner): 11 | epoch = runner.epoch 12 | model = runner.model 13 | if is_module_wrapper(model): 14 | model = model.module 15 | model.set_epoch(epoch) 16 | 17 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import collect_env as collect_base_env 3 | from mmcv.utils import get_git_hash 4 | 5 | import mmdet 6 | 7 | 8 | def collect_env(): 9 | """Collect the information of the running environments.""" 10 | env_info = collect_base_env() 11 | env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] 12 | return env_info 13 | 14 | 15 | if __name__ == '__main__': 16 | for name, val in collect_env().items(): 17 | print(f'{name}: {val}') 18 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .bbox_nms import fast_nms, multiclass_nms, nms_with_others,WeaklyMulticlassNMS 3 | from .matrix_nms import mask_matrix_nms 4 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks, 5 | merge_aug_proposals, merge_aug_scores) 6 | 7 | __all__ = [ 8 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 9 | 'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms', 10 | 'nms_with_others','WeaklyMulticlassNMS' 11 | ] 12 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import BaseDetector 3 | from .faster_rcnn import FasterRCNN 4 | from .mask_rcnn import MaskRCNN 5 | from .rpn import RPN 6 | from .single_stage import SingleStageDetector 7 | from .two_stage import TwoStageDetector 8 | from .P2BNet import P2BNet 9 | from .weak_rcnn import WeakRCNN 10 | from .PLUG import PLUG 11 | __all__ = [ 12 | 'BaseDetector','FasterRCNN','MaskRCNN', 13 | 'SingleStageDetector', 'TwoStageDetector', 'RPN', 14 | 'P2BNet','WeakRCNN','PLUG', 15 | ] 16 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/coder/base_bbox_coder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | 5 | class BaseBBoxCoder(metaclass=ABCMeta): 6 | """Base bounding box coder.""" 7 | 8 | def __init__(self, **kwargs): 9 | pass 10 | 11 | @abstractmethod 12 | def encode(self, bboxes, gt_bboxes): 13 | """Encode deltas between bboxes and ground truth boxes.""" 14 | 15 | @abstractmethod 16 | def decode(self, bboxes, bboxes_pred): 17 | """Decode the predicted bboxes according to prediction and base 18 | boxes.""" 19 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/set_iter_info_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.parallel import is_module_wrapper 3 | from mmcv.runner import HOOKS, Hook 4 | 5 | 6 | @HOOKS.register_module() 7 | class SetIterInfoHook(Hook): 8 | """Set runner's epoch information to the model.""" 9 | 10 | def before_train_iter(self, runner): 11 | iter = runner.iter 12 | inner_iter = runner.inner_iter 13 | model = runner.model 14 | if is_module_wrapper(model): 15 | model = model.module 16 | model.set_iter(iter) 17 | model.set_inner_iter(inner_iter) 18 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | 3 | __version__ = '2.22.0' 4 | short_version = __version__ 5 | 6 | 7 | def parse_version_info(version_str): 8 | version_info = [] 9 | for x in version_str.split('.'): 10 | if x.isdigit(): 11 | version_info.append(int(x)) 12 | elif x.find('rc') != -1: 13 | patch_version = x.split('rc') 14 | version_info.append(int(patch_version[0])) 15 | version_info.append(f'rc{patch_version[1]}') 16 | return tuple(version_info) 17 | 18 | 19 | version_info = parse_version_info(__version__) 20 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/logger.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import logging 3 | 4 | from mmcv.utils import get_logger 5 | 6 | 7 | def get_root_logger(log_file=None, log_level=logging.INFO): 8 | """Get root logger. 9 | 10 | Args: 11 | log_file (str, optional): File path of log. Defaults to None. 12 | log_level (int, optional): The level of logger. 13 | Defaults to logging.INFO. 14 | 15 | Returns: 16 | :obj:`logging.Logger`: The obtained logger 17 | """ 18 | logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) 19 | 20 | return logger 21 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | checkpoint_config = dict(interval=1) 2 | # yapf:disable 3 | log_config = dict( 4 | interval=50, 5 | hooks=[ 6 | dict(type='TextLoggerHook'), 7 | dict(type='TensorboardLoggerHook') 8 | ]) 9 | # yapf:enable 10 | custom_hooks = [dict(type='NumClassCheckHook')] 11 | 12 | dist_params = dict(backend='nccl') 13 | log_level = 'INFO' 14 | load_from = None 15 | resume_from = None 16 | workflow = [('train', 1)] 17 | 18 | # disable opencv multithreading to avoid system being overloaded 19 | opencv_num_threads = 0 20 | # set multi-process start method as `fork` to speed up the training 21 | mp_start_method = 'fork' 22 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, 3 | reduce_mean) 4 | from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, 5 | generate_coordinate, mask2ndarray, multi_apply, 6 | select_single_mlvl, unmap) 7 | 8 | __all__ = [ 9 | 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 10 | 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', 11 | 'center_of_mass', 'generate_coordinate', 'select_single_mlvl', 12 | 'filter_scores_and_topk' 13 | ] 14 | -------------------------------------------------------------------------------- /PLUG-Det/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_dota.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/faster_rcnn_r50_fpn.py', 3 | '../_base_/datasets/dota_detection.py', 4 | '../_base_/schedules/schedule_1x.py', 5 | '../_base_/default_runtime.py' 6 | ] 7 | checkpoint_config = dict(interval=12) 8 | evaluation = dict(interval=12, metric='bbox',classwise = True) 9 | work_dir = '/media/h/M/P2B/1dota/P2B_work_dirs/work_dirs/P2B_DOTA_1024_0.0005_stage2_basescales0_bs2/faster-rcnn' 10 | lr_config = dict( 11 | policy='step', 12 | warmup='linear', 13 | warmup_iters=500, 14 | warmup_ratio=0.001, 15 | step=[8, 11]) 16 | runner = dict(type='EpochBasedRunner', max_epochs=12) -------------------------------------------------------------------------------- /PLUG-Det/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .inference import (async_inference_detector, inference_detector, 3 | init_detector, show_result_pyplot) 4 | from .test import multi_gpu_test, single_gpu_test, single_gpu_test_twomodel 5 | from .train import (get_root_logger, init_random_seed, set_random_seed, 6 | train_detector) 7 | 8 | __all__ = [ 9 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 10 | 'async_inference_detector', 'inference_detector', 'show_result_pyplot', 11 | 'multi_gpu_test', 'single_gpu_test', 'init_random_seed','single_gpu_test_twomodel' 12 | ] 13 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/bbox2json.py: -------------------------------------------------------------------------------- 1 | import os 2 | import mmcv 3 | import json 4 | meta_dir = '/media/h/M/P2B/1dota/PLUG-res50-fuse/' 5 | bbox_json_file = os.path.join(meta_dir, 'results.bbox.json') 6 | ref_json_file = '/media/h/H/DOTA10_512_128/annotations/DOTA_train_512.json' 7 | result_json_file = os.path.join(meta_dir, 'DOTA_train_512.json') 8 | ref_json = mmcv.load(ref_json_file) 9 | bbox_all = mmcv.load(bbox_json_file) 10 | for id, bbox_info in enumerate(bbox_all): 11 | bbox_info['area'] = bbox_info['bbox'][2]*bbox_info['bbox'][3] 12 | bbox_info['iscrowd'] = 0 13 | bbox_info['id'] = id+1 14 | ref_json['annotations'] = bbox_all 15 | mmcv.dump(ref_json, result_json_file) -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import warnings 3 | 4 | from mmcv.utils import Registry, build_from_cfg 5 | 6 | PRIOR_GENERATORS = Registry('Generator for anchors and points') 7 | 8 | ANCHOR_GENERATORS = PRIOR_GENERATORS 9 | 10 | 11 | def build_prior_generator(cfg, default_args=None): 12 | return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) 13 | 14 | 15 | def build_anchor_generator(cfg, default_args=None): 16 | warnings.warn( 17 | '``build_anchor_generator`` would be deprecated soon, please use ' 18 | '``build_prior_generator`` ') 19 | return build_prior_generator(cfg, default_args=default_args) 20 | -------------------------------------------------------------------------------- /PLUG-Det/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /PLUG-Det/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${@:5} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/segm2json.py: -------------------------------------------------------------------------------- 1 | import os 2 | import mmcv 3 | from tqdm import tqdm 4 | 5 | meta_dir = '/media/h/M/P2B/1dota/PLUG-res50-fuse/' 6 | bbox_json_file = os.path.join(meta_dir, 'results.segm.json') 7 | ref_json_file = '/media/h/H/DOTA10_512_128/annotations/DOTA_train_512_coarse_seg.json' 8 | result_json_file = os.path.join(meta_dir, 'DOTA_train_512_seg.json') 9 | ref_json = mmcv.load(ref_json_file) 10 | bbox_all = mmcv.load(bbox_json_file) 11 | for id, bbox_info in tqdm(enumerate(bbox_all)): 12 | bbox_info['area'] = bbox_info['bbox'][2]*bbox_info['bbox'][3] 13 | bbox_info['iscrowd'] = 0 14 | bbox_info['id'] = id+1 15 | ref_json['annotations'] = bbox_all 16 | mmcv.dump(ref_json, result_json_file) -------------------------------------------------------------------------------- /PLUG-Det/setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | extra_standard_library = setuptools 5 | known_first_party = mmdet 6 | known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml 7 | no_lines_before = STDLIB,LOCALFOLDER 8 | default_section = THIRDPARTY 9 | 10 | [yapf] 11 | BASED_ON_STYLE = pep8 12 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 13 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 14 | 15 | [codespell] 16 | skip = *.ipynb 17 | quiet-level = 3 18 | ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood 19 | -------------------------------------------------------------------------------- /PLUG-Det/configs_wsod/oicr_vgg16.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | # model settings 3 | model = dict( 4 | type='WeakRCNN', 5 | pretrained=None, 6 | backbone=dict(type='VGG16'), 7 | neck=None, 8 | roi_head=dict( 9 | type='OICRRoIHead', 10 | bbox_roi_extractor=dict( 11 | type='SingleRoIExtractor', 12 | roi_layer=dict(type='RoIPool', output_size=7), 13 | out_channels=512, 14 | featmap_strides=[8]), 15 | bbox_head=dict( 16 | type='OICRHead', 17 | in_channels=512, 18 | hidden_channels=4096, 19 | roi_feat_size=7, 20 | num_classes=20)) 21 | ) 22 | work_dir = '/media/h/M/P2B/1dota/WSOD/oicr_vgg16/' 23 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import Registry, build_from_cfg 3 | 4 | BBOX_ASSIGNERS = Registry('bbox_assigner') 5 | BBOX_SAMPLERS = Registry('bbox_sampler') 6 | BBOX_CODERS = Registry('bbox_coder') 7 | 8 | 9 | def build_assigner(cfg, **default_args): 10 | """Builder of box assigner.""" 11 | return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) 12 | 13 | 14 | def build_sampler(cfg, **default_args): 15 | """Builder of box sampler.""" 16 | return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) 17 | 18 | 19 | def build_bbox_coder(cfg, **default_args): 20 | """Builder of box coder.""" 21 | return build_from_cfg(cfg, BBOX_CODERS, default_args) 22 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .checkloss_hook import CheckInvalidLossHook 3 | from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook 4 | from .set_epoch_info_hook import SetEpochInfoHook 5 | from .sync_norm_hook import SyncNormHook 6 | from .sync_random_size_hook import SyncRandomSizeHook 7 | from .yolox_lrupdater_hook import YOLOXLrUpdaterHook 8 | from .yolox_mode_switch_hook import YOLOXModeSwitchHook 9 | from .set_iter_info_hook import SetIterInfoHook 10 | __all__ = [ 11 | 'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook', 12 | 'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook', 13 | 'CheckInvalidLossHook', 'SetEpochInfoHook','SetIterInfoHook' 14 | ] 15 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, 3 | YOLOAnchorGenerator) 4 | from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS, 5 | build_anchor_generator, build_prior_generator) 6 | from .point_generator import MlvlPointGenerator, PointGenerator 7 | from .utils import anchor_inside_flags, calc_region, images_to_levels 8 | 9 | __all__ = [ 10 | 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', 11 | 'PointGenerator', 'images_to_levels', 'calc_region', 12 | 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator', 13 | 'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator' 14 | ] 15 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/losses/expand_onehot_labels.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from ..builder import LOSSES 6 | from .utils import weight_reduce_loss 7 | 8 | def _expand_onehot_labels(labels, label_weights, label_channels): 9 | bin_labels = labels.new_full((labels.size(0), label_channels), 0) 10 | inds = torch.nonzero( 11 | (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() 12 | if inds.numel() > 0: 13 | bin_labels[inds, labels[inds]] = 1 14 | 15 | if label_weights is None: 16 | bin_label_weights = None 17 | else: 18 | bin_label_weights = label_weights.view(-1, 1).expand( 19 | label_weights.size(0), label_channels) 20 | 21 | return bin_labels, bin_label_weights 22 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/checkloss_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv.runner.hooks import HOOKS, Hook 4 | 5 | 6 | @HOOKS.register_module() 7 | class CheckInvalidLossHook(Hook): 8 | """Check invalid loss hook. 9 | 10 | This hook will regularly check whether the loss is valid 11 | during training. 12 | 13 | Args: 14 | interval (int): Checking interval (every k iterations). 15 | Default: 50. 16 | """ 17 | 18 | def __init__(self, interval=50): 19 | self.interval = interval 20 | 21 | def after_train_iter(self, runner): 22 | if self.every_n_iters(runner, self.interval): 23 | assert torch.isfinite(runner.outputs['loss']), \ 24 | runner.logger.info('loss become infinite or NaN!') 25 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_roi_head import BaseRoIHead 3 | from .bbox_heads import(BBoxHead, 4 | Shared2FCBBoxHead) 5 | 6 | from .mask_heads import ( FCNMaskHead,) 7 | from .roi_extractors import (BaseRoIExtractor, 8 | SingleRoIExtractor) 9 | from .standard_roi_head import StandardRoIHead 10 | from .P2B_head import P2BHead 11 | from .wsddn_roi_head import WSDDNRoIHead 12 | from .oicr_roi_head import OICRRoIHead 13 | from .wsod2_roi_head import WSOD2RoIHead 14 | __all__ = [ 15 | 'BaseRoIHead', 'BBoxHead', 16 | 'Shared2FCBBoxHead', 17 | 'StandardRoIHead', 18 | 'FCNMaskHead', 'BaseRoIExtractor', 19 | 'SingleRoIExtractor', 20 | 'P2BHead','WSDDNRoIHead','OICRRoIHead','WSOD2RoIHead' 21 | ] 22 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .backbones import * # noqa: F401,F403 3 | from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 4 | ROI_EXTRACTORS, SHARED_HEADS, build_backbone, 5 | build_detector, build_head, build_loss, build_neck, 6 | build_roi_extractor, build_shared_head) 7 | from .dense_heads import * # noqa: F401,F403 8 | from .detectors import * # noqa: F401,F403 9 | from .losses import * # noqa: F401,F403 10 | from .necks import * # noqa: F401,F403 11 | from .roi_heads import * # noqa: F401,F403 12 | __all__ = [ 13 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 14 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 15 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 16 | ] 17 | -------------------------------------------------------------------------------- /PLUG-Det/configs_wsod/oicr_bbox_vgg16.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | # model settings 3 | model = dict( 4 | type='WeakRCNN', 5 | pretrained=None, 6 | backbone=dict(type='VGG16'), 7 | neck=None, 8 | roi_head=dict( 9 | type='OICRRoIHead', 10 | bbox_roi_extractor=dict( 11 | type='SingleRoIExtractor', 12 | roi_layer=dict(type='RoIPool', output_size=7), 13 | out_channels=512, 14 | featmap_strides=[8]), 15 | bbox_head=dict( 16 | type='OICRHead', 17 | in_channels=512, 18 | hidden_channels=4096, 19 | roi_feat_size=7, 20 | bbox_coder=dict( 21 | type='DeltaXYWHBBoxCoder', 22 | target_means=[0., 0., 0., 0.], 23 | target_stds=[0.1, 0.1, 0.2, 0.2]), 24 | num_classes=20)) 25 | ) 26 | work_dir = '/media/h/M/P2B/1dota/WSOD/oicr_bbox_vgg16/' 27 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import DETECTORS 3 | from .two_stage import TwoStageDetector 4 | 5 | 6 | @DETECTORS.register_module() 7 | class MaskRCNN(TwoStageDetector): 8 | """Implementation of `Mask R-CNN `_""" 9 | 10 | def __init__(self, 11 | backbone, 12 | rpn_head, 13 | roi_head, 14 | train_cfg, 15 | test_cfg, 16 | neck=None, 17 | pretrained=None, 18 | init_cfg=None): 19 | super(MaskRCNN, self).__init__( 20 | backbone=backbone, 21 | neck=neck, 22 | rpn_head=rpn_head, 23 | roi_head=roi_head, 24 | train_cfg=train_cfg, 25 | test_cfg=test_cfg, 26 | pretrained=pretrained, 27 | init_cfg=init_cfg) 28 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import DETECTORS 3 | from .two_stage import TwoStageDetector 4 | 5 | 6 | @DETECTORS.register_module() 7 | class FasterRCNN(TwoStageDetector): 8 | """Implementation of `Faster R-CNN `_""" 9 | 10 | def __init__(self, 11 | backbone, 12 | rpn_head, 13 | roi_head, 14 | train_cfg, 15 | test_cfg, 16 | neck=None, 17 | pretrained=None, 18 | init_cfg=None): 19 | super(FasterRCNN, self).__init__( 20 | backbone=backbone, 21 | neck=neck, 22 | rpn_head=rpn_head, 23 | roi_head=roi_head, 24 | train_cfg=train_cfg, 25 | test_cfg=test_cfg, 26 | pretrained=pretrained, 27 | init_cfg=init_cfg) 28 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/dense_heads/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Author: error: git config user.name && git config user.email & please set dead value or install git 3 | Date: 2022-04-28 19:28:13 4 | LastEditors: error: git config user.name && git config user.email & please set dead value or install git 5 | LastEditTime: 2022-06-28 09:57:58 6 | FilePath: /mmdetection-2.22.0/mmdet/models/dense_heads/__init__.py 7 | Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE 8 | ''' 9 | # Copyright (c) OpenMMLab. All rights reserved. 10 | from .anchor_free_head import AnchorFreeHead 11 | from .anchor_head import AnchorHead 12 | from .rpn_head import RPNHead 13 | from .base_dense_head import BaseDenseHead 14 | from .dense_test_mixins import BBoxTestMixin 15 | from .plug_head import PLUGHead 16 | __all__ = [ 17 | 'AnchorFreeHead', 'AnchorHead', 18 | 'RPNHead','PLUGHead','BaseDenseHead','BBoxTestMixin' 19 | ] 20 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset 3 | # from .cityscapes import CityscapesDataset 4 | from .coco import CocoDataset 5 | from .custom import CustomDataset 6 | from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, 7 | MultiImageMixDataset, RepeatDataset) 8 | from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler 9 | from .utils import (NumClassCheckHook, get_loading_pipeline, 10 | replace_ImageToTensor) 11 | from .dota import DOTADataset 12 | from .coco_cp import CocoCPDataset 13 | __all__ = [ 14 | 'CustomDataset', '', 'CocoDataset', 15 | 'ClassBalancedDataset', 'ConcatDataset', 16 | 'MultiImageMixDataset', 'RepeatDataset', 17 | 'DistributedGroupSampler', 'DistributedSampler', 'GroupSampler' 18 | 'NumClassCheckHook', 'get_loading_pipeline', 'replace_ImageToTensor' 19 | 'DOTADataset','CocoCPDataset', 20 | ] 21 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcv 3 | 4 | from .version import __version__, short_version 5 | 6 | 7 | def digit_version(version_str): 8 | digit_version = [] 9 | for x in version_str.split('.'): 10 | if x.isdigit(): 11 | digit_version.append(int(x)) 12 | elif x.find('rc') != -1: 13 | patch_version = x.split('rc') 14 | digit_version.append(int(patch_version[0]) - 1) 15 | digit_version.append(int(patch_version[1])) 16 | return digit_version 17 | 18 | 19 | mmcv_minimum_version = '1.3.17' 20 | mmcv_maximum_version = '1.5.0' 21 | mmcv_version = digit_version(mmcv.__version__) 22 | 23 | 24 | assert (mmcv_version >= digit_version(mmcv_minimum_version) 25 | and mmcv_version <= digit_version(mmcv_maximum_version)), \ 26 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 27 | f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' 28 | 29 | __all__ = ['__version__', 'short_version'] 30 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, 3 | get_classes, imagenet_det_classes, 4 | imagenet_vid_classes, oid_challenge_classes, 5 | oid_v6_classes, voc_classes) 6 | from .eval_hooks import DistEvalHook, EvalHook 7 | from .mean_ap import average_precision, eval_map, print_map_summary 8 | from .panoptic_utils import INSTANCE_OFFSET 9 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, 10 | print_recall_summary) 11 | 12 | __all__ = [ 13 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 14 | 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 15 | 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 16 | 'print_map_summary', 'eval_recalls', 'print_recall_summary', 17 | 'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes', 18 | 'oid_challenge_classes', 'INSTANCE_OFFSET' 19 | ] 20 | -------------------------------------------------------------------------------- /PLUG-Det/DetVisGUI/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Chien-Hung 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/util_random.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | """Helpers for random number generators.""" 3 | import numpy as np 4 | 5 | 6 | def ensure_rng(rng=None): 7 | """Coerces input into a random number generator. 8 | 9 | If the input is None, then a global random state is returned. 10 | 11 | If the input is a numeric value, then that is used as a seed to construct a 12 | random state. Otherwise the input is returned as-is. 13 | 14 | Adapted from [1]_. 15 | 16 | Args: 17 | rng (int | numpy.random.RandomState | None): 18 | if None, then defaults to the global rng. Otherwise this can be an 19 | integer or a RandomState class 20 | Returns: 21 | (numpy.random.RandomState) : rng - 22 | a numpy random number generator 23 | 24 | References: 25 | .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 26 | """ 27 | 28 | if rng is None: 29 | rng = np.random.mtrand._rand 30 | elif isinstance(rng, int): 31 | rng = np.random.RandomState(rng) 32 | else: 33 | rng = rng 34 | return rng 35 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .assigners import (AssignResult, BaseAssigner, MaxIoUAssigner) 3 | from .builder import build_assigner, build_bbox_coder, build_sampler 4 | from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder) 5 | from .iou_calculators import BboxOverlaps2D, bbox_overlaps 6 | from .samplers import (BaseSampler, RandomSampler, SamplingResult) 7 | from .transforms import (bbox2distance, bbox2result, bbox2roi, 8 | bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, 9 | bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, 10 | distance2bbox, find_inside_bboxes, roi2bbox) 11 | 12 | __all__ = ['AssignResult', 'BaseAssigner','BaseAssigner', 13 | 'build_assigner', 'build_bbox_coder', 'build_sampler', 14 | 'BaseBBoxCoder', 'DeltaXYWHBBoxCoder', 15 | 'BboxOverlaps2D', 'bbox_overlaps', 16 | 'BaseSampler', 'RandomSampler', 'SamplingResult', 17 | 'bbox2distance', 'bbox2result', 'bbox2roi', 'bbox_cxcywh_to_xyxy', 18 | 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back', 'bbox_rescale', 19 | 'bbox_xyxy_to_cxcywh', 'distance2bbox', 'find_inside_bboxes', 'roi2bbox' 20 | ] 21 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/calculate_mIoU.py: -------------------------------------------------------------------------------- 1 | import os 2 | import mmcv 3 | import json 4 | import numpy as np 5 | meta_dir = '/media/h/M/P2B/1dota/fuse_r0/seg/1.5/' 6 | bbox_json_file = os.path.join(meta_dir, 'results.bbox.json') 7 | ref_json_file = '/media/h/H/DOTA10_512_128/annotations/DOTA_train_512_coarse_seg.json' 8 | ref_json = mmcv.load(ref_json_file) 9 | bbox_all = mmcv.load(bbox_json_file) 10 | IoU_list,IoU_list_s, IoU_list_m, IoU_list_l = [],[],[],[] 11 | area_range_list = [0 ** 2, 32 ** 2, 96 ** 2, 1e5 ** 2] 12 | for bbox_info, ref_info in zip(bbox_all, ref_json['annotations']): 13 | IoU_list.append(bbox_info['score']) 14 | area = ref_info['area'] 15 | if area > area_range_list[0] and area<= area_range_list[1]: 16 | IoU_list_s.append(bbox_info['score']) 17 | if area > area_range_list[1] and area<= area_range_list[2]: 18 | IoU_list_m.append(bbox_info['score']) 19 | if area > area_range_list[2] and area<= area_range_list[3]: 20 | IoU_list_l.append(bbox_info['score']) 21 | mIoU = np.array(IoU_list).mean() 22 | mIoU_s = np.array(IoU_list_s).mean() 23 | mIoU_m = np.array(IoU_list_m).mean() 24 | mIoU_l = np.array(IoU_list_l).mean() 25 | print(mIoU, mIoU_s, mIoU_m, mIoU_l) -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import glob 3 | import os.path as osp 4 | import warnings 5 | 6 | 7 | def find_latest_checkpoint(path, suffix='pth'): 8 | """Find the latest checkpoint from the working directory. 9 | 10 | Args: 11 | path(str): The path to find checkpoints. 12 | suffix(str): File extension. 13 | Defaults to pth. 14 | 15 | Returns: 16 | latest_path(str | None): File path of the latest checkpoint. 17 | References: 18 | .. [1] https://github.com/microsoft/SoftTeacher 19 | /blob/main/ssod/utils/patch.py 20 | """ 21 | if not osp.exists(path): 22 | warnings.warn('The path of checkpoints does not exist.') 23 | return None 24 | if osp.exists(osp.join(path, f'latest.{suffix}')): 25 | return osp.join(path, f'latest.{suffix}') 26 | 27 | checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) 28 | if len(checkpoints) == 0: 29 | warnings.warn('There are no checkpoints in the path.') 30 | return None 31 | latest = -1 32 | latest_path = None 33 | for checkpoint in checkpoints: 34 | count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0]) 35 | if count > latest: 36 | latest = count 37 | latest_path = checkpoint 38 | return latest_path 39 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/demodata.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import numpy as np 3 | import torch 4 | 5 | from mmdet.utils.util_random import ensure_rng 6 | 7 | 8 | def random_boxes(num=1, scale=1, rng=None): 9 | """Simple version of ``kwimage.Boxes.random`` 10 | 11 | Returns: 12 | Tensor: shape (n, 4) in x1, y1, x2, y2 format. 13 | 14 | References: 15 | https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 16 | 17 | Example: 18 | >>> num = 3 19 | >>> scale = 512 20 | >>> rng = 0 21 | >>> boxes = random_boxes(num, scale, rng) 22 | >>> print(boxes) 23 | tensor([[280.9925, 278.9802, 308.6148, 366.1769], 24 | [216.9113, 330.6978, 224.0446, 456.5878], 25 | [405.3632, 196.3221, 493.3953, 270.7942]]) 26 | """ 27 | rng = ensure_rng(rng) 28 | 29 | tlbr = rng.rand(num, 4).astype(np.float32) 30 | 31 | tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) 32 | tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) 33 | br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) 34 | br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) 35 | 36 | tlbr[:, 0] = tl_x * scale 37 | tlbr[:, 1] = tl_y * scale 38 | tlbr[:, 2] = br_x * scale 39 | tlbr[:, 3] = br_y * scale 40 | 41 | boxes = torch.from_numpy(tlbr) 42 | return boxes 43 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .accuracy import Accuracy, accuracy 3 | from .ae_loss import AssociativeEmbeddingLoss 4 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, 5 | cross_entropy, mask_cross_entropy) 6 | from .focal_loss import FocalLoss, sigmoid_focal_loss 7 | from .gaussian_focal_loss import GaussianFocalLoss 8 | from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss 9 | from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss, 10 | bounded_iou_loss, iou_loss) 11 | from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss 12 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss 13 | from .multi_instance_learning_loss import MILLoss 14 | __all__ = [ 15 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 16 | 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', 17 | 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 18 | 'iou_loss', 'bounded_iou_loss', 19 | 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 20 | 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss', 21 | 'l1_loss', 'AssociativeEmbeddingLoss', 22 | 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss', 23 | 'MILLoss' 24 | ] 25 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/profiling.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import contextlib 3 | import sys 4 | import time 5 | 6 | import torch 7 | 8 | if sys.version_info >= (3, 7): 9 | 10 | @contextlib.contextmanager 11 | def profile_time(trace_name, 12 | name, 13 | enabled=True, 14 | stream=None, 15 | end_stream=None): 16 | """Print time spent by CPU and GPU. 17 | 18 | Useful as a temporary context manager to find sweet spots of code 19 | suitable for async implementation. 20 | """ 21 | if (not enabled) or not torch.cuda.is_available(): 22 | yield 23 | return 24 | stream = stream if stream else torch.cuda.current_stream() 25 | end_stream = end_stream if end_stream else stream 26 | start = torch.cuda.Event(enable_timing=True) 27 | end = torch.cuda.Event(enable_timing=True) 28 | stream.record_event(start) 29 | try: 30 | cpu_start = time.monotonic() 31 | yield 32 | finally: 33 | cpu_end = time.monotonic() 34 | end_stream.record_event(end) 35 | end.synchronize() 36 | cpu_time = (cpu_end - cpu_start) * 1000 37 | gpu_time = start.elapsed_time(end) 38 | msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' 39 | msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' 40 | print(msg, end_stream) 41 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/samplers/distributed_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import math 3 | 4 | import torch 5 | from torch.utils.data import DistributedSampler as _DistributedSampler 6 | 7 | 8 | class DistributedSampler(_DistributedSampler): 9 | 10 | def __init__(self, 11 | dataset, 12 | num_replicas=None, 13 | rank=None, 14 | shuffle=True, 15 | seed=0): 16 | super().__init__( 17 | dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) 18 | # for the compatibility from PyTorch 1.3+ 19 | self.seed = seed if seed is not None else 0 20 | 21 | def __iter__(self): 22 | # deterministically shuffle based on epoch 23 | if self.shuffle: 24 | g = torch.Generator() 25 | g.manual_seed(self.epoch + self.seed) 26 | indices = torch.randperm(len(self.dataset), generator=g).tolist() 27 | else: 28 | indices = torch.arange(len(self.dataset)).tolist() 29 | 30 | # add extra samples to make it evenly divisible 31 | # in case that indices is shorter than half of total_size 32 | indices = (indices * 33 | math.ceil(self.total_size / len(indices)))[:self.total_size] 34 | assert len(indices) == self.total_size 35 | 36 | # subsample 37 | indices = indices[self.rank:self.total_size:self.num_replicas] 38 | assert len(indices) == self.num_samples 39 | 40 | return iter(indices) 41 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import warnings 3 | 4 | from mmcv.cnn import MODELS as MMCV_MODELS 5 | from mmcv.utils import Registry 6 | 7 | MODELS = Registry('models', parent=MMCV_MODELS) 8 | 9 | BACKBONES = MODELS 10 | NECKS = MODELS 11 | ROI_EXTRACTORS = MODELS 12 | SHARED_HEADS = MODELS 13 | HEADS = MODELS 14 | LOSSES = MODELS 15 | DETECTORS = MODELS 16 | 17 | 18 | def build_backbone(cfg): 19 | """Build backbone.""" 20 | return BACKBONES.build(cfg) 21 | 22 | 23 | def build_neck(cfg): 24 | """Build neck.""" 25 | return NECKS.build(cfg) 26 | 27 | 28 | def build_roi_extractor(cfg): 29 | """Build roi extractor.""" 30 | return ROI_EXTRACTORS.build(cfg) 31 | 32 | 33 | def build_shared_head(cfg): 34 | """Build shared head.""" 35 | return SHARED_HEADS.build(cfg) 36 | 37 | 38 | def build_head(cfg): 39 | """Build head.""" 40 | return HEADS.build(cfg) 41 | 42 | 43 | def build_loss(cfg): 44 | """Build loss.""" 45 | return LOSSES.build(cfg) 46 | 47 | 48 | def build_detector(cfg, train_cfg=None, test_cfg=None): 49 | """Build detector.""" 50 | if train_cfg is not None or test_cfg is not None: 51 | warnings.warn( 52 | 'train_cfg and test_cfg is deprecated, ' 53 | 'please specify them in model', UserWarning) 54 | assert cfg.get('train_cfg') is None or train_cfg is None, \ 55 | 'train_cfg specified in both outer field and model field ' 56 | assert cfg.get('test_cfg') is None or test_cfg is None, \ 57 | 'test_cfg specified in both outer field and model field ' 58 | return DETECTORS.build( 59 | cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) 60 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/api_wrappers/coco_api.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | # This file add snake case alias for coco api 3 | 4 | import warnings 5 | 6 | import pycocotools 7 | from pycocotools.coco import COCO as _COCO 8 | from pycocotools.cocoeval import COCOeval as _COCOeval 9 | 10 | 11 | class COCO(_COCO): 12 | """This class is almost the same as official pycocotools package. 13 | 14 | It implements some snake case function aliases. So that the COCO class has 15 | the same interface as LVIS class. 16 | """ 17 | 18 | def __init__(self, annotation_file=None): 19 | if getattr(pycocotools, '__version__', '0') >= '12.0.2': 20 | warnings.warn( 21 | 'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501 22 | UserWarning) 23 | super().__init__(annotation_file=annotation_file) 24 | self.img_ann_map = self.imgToAnns 25 | self.cat_img_map = self.catToImgs 26 | 27 | def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): 28 | return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) 29 | 30 | def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): 31 | return self.getCatIds(cat_names, sup_names, cat_ids) 32 | 33 | def get_img_ids(self, img_ids=[], cat_ids=[]): 34 | return self.getImgIds(img_ids, cat_ids) 35 | 36 | def load_anns(self, ids): 37 | return self.loadAnns(ids) 38 | 39 | def load_cats(self, ids): 40 | return self.loadCats(ids) 41 | 42 | def load_imgs(self, ids): 43 | return self.loadImgs(ids) 44 | 45 | 46 | # just for the ease of import 47 | COCOeval = _COCOeval 48 | -------------------------------------------------------------------------------- /PLUG-Det/tools/analysis_tools/json单类划分.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | from argparse import ArgumentParser 4 | from multiprocessing import Pool 5 | 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | from pycocotools.coco import COCO 9 | from pycocotools.cocoeval import COCOeval 10 | import json 11 | 12 | def main(): 13 | ann_file ='/media/h/M/dataset/AITOD/annotations/aitod_train_v1.json' 14 | res_dir ='/media/h/M/dataset/AITOD/annotations/single/' 15 | 16 | data = open(ann_file, "r", encoding="utf-8") 17 | ann_dict = json.load(data) # 字典 18 | data.close() 19 | dicts_list = [] 20 | ann_image_ids = [] 21 | for class_idx, cat in enumerate(ann_dict['categories']): 22 | dict = {'annotations':[],'categories':[],'images':[]} 23 | dicts_list.append(dict) 24 | dicts_list[class_idx]['categories'].append(cat) 25 | dicts_list[class_idx]['categories'][0]['id']=1 26 | ann_image_ids.append([]) 27 | for idx, ann in enumerate(ann_dict['annotations']): 28 | dicts_list[ann['category_id']-1]['annotations'].append(ann) 29 | ann_image_ids[ann['category_id']-1].append( ann['image_id']) 30 | for idx in range(8): 31 | ann_image_ids[idx] = np.unique(ann_image_ids[idx] ) 32 | ann_image_ids[idx] .sort() 33 | for i in ann_image_ids[idx]: 34 | for image in ann_dict['images']: 35 | if image['id']==i: 36 | dicts_list[idx]['images'].append(image) 37 | res_file = res_dir + 'aitod_train_{}.json'.format(ann_dict['categories'][idx]['name']) 38 | with open (res_file,'w') as f: 39 | json.dump(dicts_list[idx] ,f) 40 | 41 | 42 | if __name__ == "__main__": 43 | main() -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/optimizer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Author: error: git config user.name && git config user.email & please set dead value or install git 3 | Date: 2022-06-19 21:46:51 4 | LastEditors: error: git config user.name && git config user.email & please set dead value or install git 5 | LastEditTime: 2022-06-20 09:16:59 6 | FilePath: /mmdetection-2.22.0/mmdet/utils/optimizer.py 7 | Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE 8 | ''' 9 | from mmcv.runner import OptimizerHook, HOOKS 10 | try: 11 | import apex 12 | except: 13 | print('apex is not installed') 14 | 15 | 16 | @HOOKS.register_module() 17 | class DistOptimizerHook(OptimizerHook): 18 | """Optimizer hook for distributed training.""" 19 | 20 | def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False): 21 | self.grad_clip = grad_clip 22 | self.coalesce = coalesce 23 | self.bucket_size_mb = bucket_size_mb 24 | self.update_interval = update_interval 25 | self.use_fp16 = use_fp16 26 | 27 | def before_run(self, runner): 28 | runner.optimizer.zero_grad() 29 | 30 | def after_train_iter(self, runner): 31 | runner.outputs['loss'] /= self.update_interval 32 | if self.use_fp16: 33 | with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss: 34 | scaled_loss.backward() 35 | else: 36 | runner.outputs['loss'].backward() 37 | if self.every_n_iters(runner, self.update_interval): 38 | if self.grad_clip is not None: 39 | self.clip_grads(runner.model.parameters()) 40 | runner.optimizer.step() 41 | runner.optimizer.zero_grad() 42 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, 3 | ContrastTransform, EqualizeTransform, Rotate, Shear, 4 | Translate) 5 | from .compose import Compose 6 | from .formatting import (Collect, DefaultFormatBundle, ImageToTensor, 7 | ToDataContainer, ToTensor, Transpose, to_tensor) 8 | from .instaboost import InstaBoost 9 | from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam, 10 | LoadMultiChannelImageFromFiles, LoadPanopticAnnotations, 11 | LoadProposals) 12 | from .test_time_aug import MultiScaleFlipAug 13 | from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic, 14 | Normalize, Pad, PhotoMetricDistortion, RandomAffine, 15 | RandomCenterCropPad, RandomCrop, RandomFlip, 16 | RandomShift, Resize, SegRescale, YOLOXHSVRandomAug) 17 | 18 | __all__ = [ 19 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 20 | 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', 21 | 'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations', 22 | 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug', 23 | 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale', 24 | 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu', 25 | 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear', 26 | 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform', 27 | 'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp', 28 | 'RandomAffine', 'YOLOXHSVRandomAug' 29 | ] 30 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/iSAID_generate_single_point_annotation.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import xml.dom.minidom 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from collections import OrderedDict 7 | #!/usr/bin/env python3 8 | # -*- coding: utf-8 -*- 9 | import random 10 | import os 11 | import matplotlib.pyplot as plt 12 | import numpy as np 13 | 14 | def main(): 15 | json_file = '/media/h/H/ISAID_512_128/annotations/ISAID_train_512.json' 16 | fr = open(json_file) 17 | data = json.load(fr) 18 | new_data = data.copy() 19 | fake_wh = 8 20 | for data_image in new_data['images']: 21 | data_image['file_name'] = data_image['file_name'].replace('instance_id_RGB_','') 22 | for data_ann in new_data['annotations']: 23 | # 1 center point 24 | # center_temp = np.array(data_ann['bbox']).reshape(-1,2) 25 | # center_temp = (center_temp[0]+center_temp[1]/2).astype(int) 26 | # mask_points = np.array(data_ann['segmentation'][0]).reshape(-1,2) 27 | # flag_center = (center_temp==mask_points)[:,0]&(center_temp==mask_points)[:,1] 28 | # if flag_center.sum() > 0: 29 | # center_point = center_temp 30 | # else: 31 | # center_index = ((mask_points-center_temp)**2).sum(-1).argmin() 32 | # center_point = mask_points[center_index] 33 | # data_ann['point'] = center_point.tolist() 34 | # 2 coarse point 35 | data_ann['point'] = random.sample(list(np.array(data_ann['segmentation'][0]).reshape(-1,2)),1)[0].tolist() 36 | 37 | data_ann['pseudo_bbox'] = [data_ann['point'][0]-fake_wh/2, data_ann['point'][1]-fake_wh/2,fake_wh , fake_wh] 38 | out_file = open('/media/h/H/ISAID_512_128/annotations/train_512_coarse.json', "w") 39 | json.dump(new_data,out_file) 40 | 41 | if __name__ == '__main__': 42 | main() 43 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/datasets/pipelines/compose.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import collections 3 | 4 | from mmcv.utils import build_from_cfg 5 | 6 | from ..builder import PIPELINES 7 | 8 | 9 | @PIPELINES.register_module() 10 | class Compose: 11 | """Compose multiple transforms sequentially. 12 | 13 | Args: 14 | transforms (Sequence[dict | callable]): Sequence of transform object or 15 | config dict to be composed. 16 | """ 17 | 18 | def __init__(self, transforms): 19 | assert isinstance(transforms, collections.abc.Sequence) 20 | self.transforms = [] 21 | for transform in transforms: 22 | if isinstance(transform, dict): 23 | transform = build_from_cfg(transform, PIPELINES) 24 | self.transforms.append(transform) 25 | elif callable(transform): 26 | self.transforms.append(transform) 27 | else: 28 | raise TypeError('transform must be callable or a dict') 29 | 30 | def __call__(self, data): 31 | """Call function to apply transforms sequentially. 32 | 33 | Args: 34 | data (dict): A result dict contains the data to transform. 35 | 36 | Returns: 37 | dict: Transformed data. 38 | """ 39 | 40 | for t in self.transforms: 41 | data = t(data) 42 | if data is None: 43 | return None 44 | return data 45 | 46 | def __repr__(self): 47 | format_string = self.__class__.__name__ + '(' 48 | for t in self.transforms: 49 | str_ = t.__repr__() 50 | if 'Compose(' in str_: 51 | str_ = str_.replace('\n', '\n ') 52 | format_string += '\n' 53 | format_string += f' {str_}' 54 | format_string += '\n)' 55 | return format_string 56 | -------------------------------------------------------------------------------- /PLUG-Det/tools/misc/print_config.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import argparse 3 | import warnings 4 | 5 | from mmcv import Config, DictAction 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser(description='Print the whole config') 10 | parser.add_argument('config', help='config file path') 11 | parser.add_argument( 12 | '--options', 13 | nargs='+', 14 | action=DictAction, 15 | help='override some settings in the used config, the key-value pair ' 16 | 'in xxx=yyy format will be merged into config file (deprecate), ' 17 | 'change to --cfg-options instead.') 18 | parser.add_argument( 19 | '--cfg-options', 20 | nargs='+', 21 | action=DictAction, 22 | help='override some settings in the used config, the key-value pair ' 23 | 'in xxx=yyy format will be merged into config file. If the value to ' 24 | 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 25 | 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 26 | 'Note that the quotation marks are necessary and that no white space ' 27 | 'is allowed.') 28 | args = parser.parse_args() 29 | 30 | if args.options and args.cfg_options: 31 | raise ValueError( 32 | '--options and --cfg-options cannot be both ' 33 | 'specified, --options is deprecated in favor of --cfg-options') 34 | if args.options: 35 | warnings.warn('--options is deprecated in favor of --cfg-options') 36 | args.cfg_options = args.options 37 | 38 | return args 39 | 40 | 41 | def main(): 42 | args = parse_args() 43 | 44 | cfg = Config.fromfile(args.config) 45 | if args.cfg_options is not None: 46 | cfg.merge_from_dict(args.cfg_options) 47 | print(f'Config:\n{cfg.pretty_text}') 48 | 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/utils/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch.nn as nn 3 | from mmcv.utils import Registry, build_from_cfg 4 | 5 | TRANSFORMER = Registry('Transformer') 6 | LINEAR_LAYERS = Registry('linear layers') 7 | CLSTRANSFORMER = Registry('CLSTransformer') 8 | 9 | def build_transformer(cfg, default_args=None): 10 | """Builder for Transformer.""" 11 | return build_from_cfg(cfg, TRANSFORMER, default_args) 12 | 13 | def build_clstransformer(cfg, default_args=None): 14 | """Builder for Transformer.""" 15 | return build_from_cfg(cfg, CLSTRANSFORMER, default_args) 16 | 17 | LINEAR_LAYERS.register_module('Linear', module=nn.Linear) 18 | 19 | 20 | def build_linear_layer(cfg, *args, **kwargs): 21 | """Build linear layer. 22 | Args: 23 | cfg (None or dict): The linear layer config, which should contain: 24 | - type (str): Layer type. 25 | - layer args: Args needed to instantiate an linear layer. 26 | args (argument list): Arguments passed to the `__init__` 27 | method of the corresponding linear layer. 28 | kwargs (keyword arguments): Keyword arguments passed to the `__init__` 29 | method of the corresponding linear layer. 30 | Returns: 31 | nn.Module: Created linear layer. 32 | """ 33 | if cfg is None: 34 | cfg_ = dict(type='Linear') 35 | else: 36 | if not isinstance(cfg, dict): 37 | raise TypeError('cfg must be a dict') 38 | if 'type' not in cfg: 39 | raise KeyError('the cfg dict must contain the key "type"') 40 | cfg_ = cfg.copy() 41 | 42 | layer_type = cfg_.pop('type') 43 | if layer_type not in LINEAR_LAYERS: 44 | raise KeyError(f'Unrecognized linear type {layer_type}') 45 | else: 46 | linear_layer = LINEAR_LAYERS.get(layer_type) 47 | 48 | layer = linear_layer(*args, **kwargs, **cfg_) 49 | 50 | return layer 51 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/sync_norm_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from collections import OrderedDict 3 | 4 | from mmcv.runner import get_dist_info 5 | from mmcv.runner.hooks import HOOKS, Hook 6 | from torch import nn 7 | 8 | from ..utils.dist_utils import all_reduce_dict 9 | 10 | 11 | def get_norm_states(module): 12 | async_norm_states = OrderedDict() 13 | for name, child in module.named_modules(): 14 | if isinstance(child, nn.modules.batchnorm._NormBase): 15 | for k, v in child.state_dict().items(): 16 | async_norm_states['.'.join([name, k])] = v 17 | return async_norm_states 18 | 19 | 20 | @HOOKS.register_module() 21 | class SyncNormHook(Hook): 22 | """Synchronize Norm states after training epoch, currently used in YOLOX. 23 | 24 | Args: 25 | num_last_epochs (int): The number of latter epochs in the end of the 26 | training to switch to synchronizing norm interval. Default: 15. 27 | interval (int): Synchronizing norm interval. Default: 1. 28 | """ 29 | 30 | def __init__(self, num_last_epochs=15, interval=1): 31 | self.interval = interval 32 | self.num_last_epochs = num_last_epochs 33 | 34 | def before_train_epoch(self, runner): 35 | epoch = runner.epoch 36 | if (epoch + 1) == runner.max_epochs - self.num_last_epochs: 37 | # Synchronize norm every epoch. 38 | self.interval = 1 39 | 40 | def after_train_epoch(self, runner): 41 | """Synchronizing norm.""" 42 | epoch = runner.epoch 43 | module = runner.model 44 | if (epoch + 1) % self.interval == 0: 45 | _, world_size = get_dist_info() 46 | if world_size == 1: 47 | return 48 | norm_states = get_norm_states(module) 49 | if len(norm_states) == 0: 50 | return 51 | norm_states = all_reduce_dict(norm_states, op='mean') 52 | module.load_state_dict(norm_states, strict=False) 53 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/utils/brick_wrappers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version 6 | 7 | if torch.__version__ == 'parrots': 8 | TORCH_VERSION = torch.__version__ 9 | else: 10 | # torch.__version__ could be 1.3.1+cu92, we only need the first two 11 | # for comparison 12 | TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) 13 | 14 | 15 | def adaptive_avg_pool2d(input, output_size): 16 | """Handle empty batch dimension to adaptive_avg_pool2d. 17 | 18 | Args: 19 | input (tensor): 4D tensor. 20 | output_size (int, tuple[int,int]): the target output size. 21 | """ 22 | if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): 23 | if isinstance(output_size, int): 24 | output_size = [output_size, output_size] 25 | output_size = [*input.shape[:2], *output_size] 26 | empty = NewEmptyTensorOp.apply(input, output_size) 27 | return empty 28 | else: 29 | return F.adaptive_avg_pool2d(input, output_size) 30 | 31 | 32 | class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): 33 | """Handle empty batch dimension to AdaptiveAvgPool2d.""" 34 | 35 | def forward(self, x): 36 | # PyTorch 1.9 does not support empty tensor inference yet 37 | if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): 38 | output_size = self.output_size 39 | if isinstance(output_size, int): 40 | output_size = [output_size, output_size] 41 | else: 42 | output_size = [ 43 | v if v is not None else d 44 | for v, d in zip(output_size, 45 | x.size()[-2:]) 46 | ] 47 | output_size = [*x.shape[:2], *output_size] 48 | empty = NewEmptyTensorOp.apply(x, output_size) 49 | return empty 50 | 51 | return super().forward(x) 52 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/datasets/dota_instance.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | dataset_type = 'DOTADataset' 3 | data_root = '/media/h/H/DOTA10_512_128/' 4 | img_norm_cfg = dict( 5 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | train_pipeline = [ 7 | dict(type='LoadImageFromFile'), 8 | dict(type='LoadAnnotations', with_bbox=True, with_mask=True), 9 | dict(type='Resize', img_scale=(512, 512), keep_ratio=True), 10 | dict(type='RandomFlip', flip_ratio=0.5), 11 | dict(type='Normalize', **img_norm_cfg), 12 | dict(type='Pad', size_divisor=32), 13 | dict(type='DefaultFormatBundle'), 14 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), 15 | ] 16 | test_pipeline = [ 17 | dict(type='LoadImageFromFile'), 18 | dict( 19 | type='MultiScaleFlipAug', 20 | img_scale=(512, 512), 21 | flip=False, 22 | transforms=[ 23 | dict(type='Resize', keep_ratio=True), 24 | dict(type='RandomFlip'), 25 | dict(type='Normalize', **img_norm_cfg), 26 | dict(type='Pad', size_divisor=32), 27 | dict(type='ImageToTensor', keys=['img']), 28 | dict(type='Collect', keys=['img']), 29 | ]) 30 | ] 31 | data = dict( 32 | samples_per_gpu=8, 33 | workers_per_gpu=4, 34 | train=dict( 35 | type=dataset_type, 36 | # ann_file=data_root + 'annotations/DOTA_train_512_coarse_seg.json', 37 | ann_file = '/media/h/M/P2B/1dota/fuse_r0/0new/jdbj-0.3/DOTA_train_512_seg.json', 38 | img_prefix=data_root + 'train/images/', 39 | pipeline=train_pipeline), 40 | val=dict( 41 | type=dataset_type, 42 | ann_file=data_root + 'annotations/DOTA_train_512_coarse_seg.json', 43 | img_prefix=data_root + 'train/images/', 44 | pipeline=test_pipeline), 45 | test=dict( 46 | type=dataset_type, 47 | ann_file=data_root + 'annotations/DOTA_val_512.json', 48 | img_prefix=data_root + 'val/images/', 49 | pipeline=test_pipeline)) 50 | evaluation = dict(interval = 12, metric=['bbox', 'segm']) 51 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/line_chart.py: -------------------------------------------------------------------------------- 1 | import xlrd # 导入库 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import mpl_toolkits.axisartist as axisartist 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import glob 8 | xls_dir = glob.glob('/media/h/M/P2B/1dota/paper/dsp/excel/*.xls') 9 | out_num = [] 10 | for xls_path in xls_dir: 11 | # 打开文件 12 | xlsx = xlrd.open_workbook(xls_path) 13 | # 查看所有sheet列表 14 | print('All sheets: %s' % xlsx.sheet_names()) 15 | sheet1 = xlsx.sheets()[0] # 获得第1张sheet,索引从0开始 16 | sheet1_name = sheet1.name # 获得名称 17 | sheet1_cols = sheet1.ncols # 获得列数 18 | sheet1_nrows = sheet1.nrows # 获得行数 19 | print('Sheet1 Name: %s\nSheet1 cols: %s\nSheet1 rows: %s' % (sheet1_name, sheet1_cols, sheet1_nrows)) 20 | out_num_single = sheet1.col_values(1)[1:] 21 | out_num.append(out_num_single) 22 | out_num.sort() 23 | out_num = np.stack(out_num,0) 24 | out_num_x = out_num[:,0].astype(int) +1 25 | out_num_y = out_num[:,1:] 26 | name_list = ['PL','BD', 'BR', 'GTF', 'SV', 'LV', 'SH', 'TC', 'BC', 'ST', 'SBF', 'RA', 'HB', 'SP', 'HC'] 27 | font_dict = {'family': 'Calibri', 28 | 'size': 32, 29 | } 30 | #折线图 31 | fig, ax = plt.subplots(figsize=(5,5)) 32 | for i in out_num_x: 33 | marker = '$'+ str(i) + '$' 34 | ax.plot(out_num_x, out_num_y[i-1], label=name_list[i-1], linewidth = 5, marker = marker,markersize=20) 35 | plt.legend(loc='upper right', ncol=8, fontsize = 20 ) 36 | plt.xlabel("Categories", fontdict=font_dict) 37 | plt.ylabel("Masked mean response", fontdict=font_dict) 38 | ax.set_xticks(out_num_x) 39 | ax.set_xticklabels (['PL','BD', 'BR', 'GTF', 'SV', 'LV', 'SH', 'TC', 'BC', 'ST', 'SBF', 'RA', 'HB', 'SP', 'HC'], fontsize = 28) 40 | index = np.around(np.arange(12)*0.1, decimals=1) 41 | index_label = index 42 | ax.set_yticks(index) 43 | ax.set_yticklabels(['0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '0.8', '0.9', '1.0', ' '], fontsize = 28) 44 | plt.show() 45 | fig.savefig('/media/h/M/P2B/1dota/paper/dsp/dsp.pdf', dpi = 300, bbox_inches = 'tight',pad_inches = 0) 46 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/visualization/palette.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcv 3 | import numpy as np 4 | 5 | 6 | def palette_val(palette): 7 | """Convert palette to matplotlib palette. 8 | 9 | Args: 10 | palette List[tuple]: A list of color tuples. 11 | 12 | Returns: 13 | List[tuple[float]]: A list of RGB matplotlib color tuples. 14 | """ 15 | new_palette = [] 16 | for color in palette: 17 | color = [c / 255 for c in color] 18 | new_palette.append(tuple(color)) 19 | return new_palette 20 | 21 | 22 | def get_palette(palette, num_classes): 23 | """Get palette from various inputs. 24 | 25 | Args: 26 | palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs. 27 | num_classes (int): the number of classes. 28 | 29 | Returns: 30 | list[tuple[int]]: A list of color tuples. 31 | """ 32 | assert isinstance(num_classes, int) 33 | 34 | if isinstance(palette, list): 35 | dataset_palette = palette 36 | elif isinstance(palette, tuple): 37 | dataset_palette = [palette] * num_classes 38 | elif palette == 'random' or palette is None: 39 | state = np.random.get_state() 40 | # random color 41 | np.random.seed(42) 42 | palette = np.random.randint(0, 256, size=(num_classes, 3)) 43 | np.random.set_state(state) 44 | dataset_palette = [tuple(c) for c in palette] 45 | elif palette == 'coco': 46 | from mmdet.datasets import CocoDataset, CocoPanopticDataset 47 | dataset_palette = CocoDataset.PALETTE 48 | if len(dataset_palette) < num_classes: 49 | dataset_palette = CocoPanopticDataset.PALETTE 50 | elif palette == 'citys': 51 | from mmdet.datasets import CityscapesDataset 52 | dataset_palette = CityscapesDataset.PALETTE 53 | elif palette == 'voc': 54 | from mmdet.datasets import VOCDataset 55 | dataset_palette = VOCDataset.PALETTE 56 | elif mmcv.is_str(palette): 57 | dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes 58 | else: 59 | raise TypeError(f'Invalid type for palette: {type(palette)}') 60 | 61 | assert len(dataset_palette) >= num_classes, \ 62 | 'The length of palette should not be less than `num_classes`.' 63 | return dataset_palette 64 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/split_DOTA_image_and_json.py: -------------------------------------------------------------------------------- 1 | import os 2 | import mmcv 3 | import json 4 | from pycocotools.coco import COCO 5 | import glob 6 | from tqdm import tqdm 7 | import shutil 8 | import numpy as np 9 | images_dir = '/media/h/H/ISAID_512_128/train/images/' 10 | img_path_list = glob.glob(images_dir +'/*.png') 11 | ref_json_file = '/media/h/H/ISAID_512_128/annotations/DOTA_train_512_coarse.json' 12 | out_dir = '/media/h/H/ISAID_512_128/train/images_num/' 13 | out_json_dir = os.path.join(out_dir,'annotations') 14 | if not os.path.isdir(out_json_dir): os.mkdir(out_json_dir) 15 | ref_json = mmcv.load(ref_json_file) 16 | ref_coco = COCO(ref_json_file) 17 | ref_coco.getImgIds() 18 | img_ids = ref_coco.getImgIds() 19 | ins_nums = [] 20 | new_images = dict() 21 | new_categories = ref_json['categories'].copy() 22 | new_annotations = dict() 23 | for imgId in tqdm(img_ids): 24 | img = ref_coco.loadImgs(imgId)[0] 25 | img_name = img['file_name'] 26 | ori_img_path = os.path.join(images_dir, img_name) 27 | ins_num = len(ref_coco.imgToAnns[imgId]) 28 | save_dir = os.path.join(out_dir, str(ins_num)) 29 | if not os.path.isdir(save_dir): os.mkdir(save_dir) 30 | save_img_path = os.path.join(save_dir, img_name) 31 | shutil.copyfile(ori_img_path, save_img_path) 32 | ins_nums.append(ins_num) 33 | if not ins_num in new_images.keys(): new_images.update({ins_num: list()}) 34 | img_modify = img.copy() 35 | img_modify['id']=len(new_images[ins_num])+1 36 | new_images[ins_num].append(img_modify) 37 | if not ins_num in new_annotations.keys(): new_annotations.update({ins_num: list()}) 38 | for ins_id in range(ins_num): 39 | ann_modify = ref_coco.imgToAnns[imgId][ins_id] 40 | ann_modify['id'] = len(new_annotations[ins_num])+1 41 | ann_modify['image_id'] = img_modify['id'] 42 | new_annotations[ins_num].append(ann_modify) 43 | ins_nums = np.unique(ins_nums) 44 | for ins_num in ins_nums: 45 | ins_json = dict() 46 | ins_json['images'] = new_images[ins_num] 47 | ins_json['categories'] = new_categories 48 | ins_json['annotations'] = new_annotations[ins_num] 49 | out_json_path = os.path.join(out_json_dir, 'train_'+str(ins_num)+'.json') 50 | mmcv.dump(ins_json, out_json_path) -------------------------------------------------------------------------------- /PLUG-Det/mmdet/utils/setup_env.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import os 3 | import platform 4 | import warnings 5 | 6 | import cv2 7 | import torch.multiprocessing as mp 8 | 9 | 10 | def setup_multi_processes(cfg): 11 | """Setup multi-processing environment variables.""" 12 | # set multi-process start method as `fork` to speed up the training 13 | if platform.system() != 'Windows': 14 | mp_start_method = cfg.get('mp_start_method', 'fork') 15 | current_method = mp.get_start_method(allow_none=True) 16 | if current_method is not None and current_method != mp_start_method: 17 | warnings.warn( 18 | f'Multi-processing start method `{mp_start_method}` is ' 19 | f'different from the previous setting `{current_method}`.' 20 | f'It will be force set to `{mp_start_method}`. You can change ' 21 | f'this behavior by changing `mp_start_method` in your config.') 22 | mp.set_start_method(mp_start_method, force=True) 23 | 24 | # disable opencv multithreading to avoid system being overloaded 25 | opencv_num_threads = cfg.get('opencv_num_threads', 0) 26 | cv2.setNumThreads(opencv_num_threads) 27 | 28 | # setup OMP threads 29 | # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa 30 | if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: 31 | omp_num_threads = 1 32 | warnings.warn( 33 | f'Setting OMP_NUM_THREADS environment variable for each process ' 34 | f'to be {omp_num_threads} in default, to avoid your system being ' 35 | f'overloaded, please further tune the variable for optimal ' 36 | f'performance in your application as needed.') 37 | os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) 38 | 39 | # setup MKL threads 40 | if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1: 41 | mkl_num_threads = 1 42 | warnings.warn( 43 | f'Setting MKL_NUM_THREADS environment variable for each process ' 44 | f'to be {mkl_num_threads} in default, to avoid your system being ' 45 | f'overloaded, please further tune the variable for optimal ' 46 | f'performance in your application as needed.') 47 | os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) 48 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/datasets/dota_cp_detection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Author: error: git config user.name && git config user.email & please set dead value or install git 3 | Date: 2022-05-06 09:04:16 4 | LastEditors: error: git config user.name && git config user.email & please set dead value or install git 5 | LastEditTime: 2022-06-07 14:39:15 6 | FilePath: /mmdetection-2.22.0/configs/_base_/datasets/dota_detection.py 7 | Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE 8 | ''' 9 | # dataset settings 10 | dataset_type = 'CocoCPDataset' 11 | data_root = '/media/h/H/DOTA10_512_128/' 12 | img_norm_cfg = dict( 13 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 14 | train_pipeline = [ 15 | dict(type='LoadImageFromFile'), 16 | dict(type='LoadAnnotations', with_bbox=True), 17 | dict(type='Resize', img_scale=(512, 512), keep_ratio=True), 18 | dict(type='RandomFlip', flip_ratio=0.5), 19 | dict(type='Normalize', **img_norm_cfg), 20 | dict(type='Pad', size_divisor=32), 21 | dict(type='DefaultFormatBundle'), 22 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_pseudo_bboxes']), 23 | ] 24 | test_pipeline = [ 25 | dict(type='LoadImageFromFile'), 26 | dict( 27 | type='MultiScaleFlipAug', 28 | img_scale=(512, 512), 29 | flip=False, 30 | transforms=[ 31 | dict(type='Resize', keep_ratio=True), 32 | dict(type='RandomFlip'), 33 | dict(type='Normalize', **img_norm_cfg), 34 | dict(type='Pad', size_divisor=32), 35 | dict(type='ImageToTensor', keys=['img']), 36 | dict(type='Collect', keys=['img']), 37 | ]) 38 | ] 39 | data = dict( 40 | samples_per_gpu=2, 41 | workers_per_gpu=0, 42 | train=dict( 43 | type=dataset_type, 44 | ann_file=data_root + 'annotations/DOTA_train_512_coarse.json', 45 | img_prefix=data_root + 'train/images/', 46 | pipeline=train_pipeline), 47 | val=dict( 48 | type=dataset_type, 49 | ann_file=data_root + 'annotations/DOTA_train_512_coarse.json', 50 | img_prefix=data_root + 'train/images/', 51 | pipeline=test_pipeline), 52 | test=dict( 53 | type=dataset_type, 54 | ann_file=data_root + 'annotations/DOTA_train_512_coarse.json', 55 | img_prefix=data_root + 'train/images/', 56 | pipeline=test_pipeline)) 57 | evaluation = dict(metric=['bbox']) 58 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/yolox_mode_switch_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.parallel import is_module_wrapper 3 | from mmcv.runner.hooks import HOOKS, Hook 4 | 5 | 6 | @HOOKS.register_module() 7 | class YOLOXModeSwitchHook(Hook): 8 | """Switch the mode of YOLOX during training. 9 | 10 | This hook turns off the mosaic and mixup data augmentation and switches 11 | to use L1 loss in bbox_head. 12 | 13 | Args: 14 | num_last_epochs (int): The number of latter epochs in the end of the 15 | training to close the data augmentation and switch to L1 loss. 16 | Default: 15. 17 | skip_type_keys (list[str], optional): Sequence of type string to be 18 | skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp') 19 | """ 20 | 21 | def __init__(self, 22 | num_last_epochs=15, 23 | skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')): 24 | self.num_last_epochs = num_last_epochs 25 | self.skip_type_keys = skip_type_keys 26 | self._restart_dataloader = False 27 | 28 | def before_train_epoch(self, runner): 29 | """Close mosaic and mixup augmentation and switches to use L1 loss.""" 30 | epoch = runner.epoch 31 | train_loader = runner.data_loader 32 | model = runner.model 33 | if is_module_wrapper(model): 34 | model = model.module 35 | if (epoch + 1) == runner.max_epochs - self.num_last_epochs: 36 | runner.logger.info('No mosaic and mixup aug now!') 37 | # The dataset pipeline cannot be updated when persistent_workers 38 | # is True, so we need to force the dataloader's multi-process 39 | # restart. This is a very hacky approach. 40 | train_loader.dataset.update_skip_type_keys(self.skip_type_keys) 41 | if hasattr(train_loader, 'persistent_workers' 42 | ) and train_loader.persistent_workers is True: 43 | train_loader._DataLoader__initialized = False 44 | train_loader._iterator = None 45 | self._restart_dataloader = True 46 | runner.logger.info('Add additional L1 loss now!') 47 | model.bbox_head.use_l1 = True 48 | else: 49 | # Once the restart is complete, we need to restore 50 | # the initialization flag. 51 | if self._restart_dataloader: 52 | train_loader._DataLoader__initialized = True 53 | -------------------------------------------------------------------------------- /PLUG-Det/model-index.yml: -------------------------------------------------------------------------------- 1 | Import: 2 | - configs/atss/metafile.yml 3 | - configs/autoassign/metafile.yml 4 | - configs/carafe/metafile.yml 5 | - configs/cascade_rcnn/metafile.yml 6 | - configs/cascade_rpn/metafile.yml 7 | - configs/centernet/metafile.yml 8 | - configs/centripetalnet/metafile.yml 9 | - configs/cornernet/metafile.yml 10 | - configs/dcn/metafile.yml 11 | - configs/dcnv2/metafile.yml 12 | - configs/deformable_detr/metafile.yml 13 | - configs/detectors/metafile.yml 14 | - configs/detr/metafile.yml 15 | - configs/double_heads/metafile.yml 16 | - configs/dyhead/metafile.yml 17 | - configs/dynamic_rcnn/metafile.yml 18 | - configs/empirical_attention/metafile.yml 19 | - configs/faster_rcnn/metafile.yml 20 | - configs/fcos/metafile.yml 21 | - configs/foveabox/metafile.yml 22 | - configs/fpg/metafile.yml 23 | - configs/free_anchor/metafile.yml 24 | - configs/fsaf/metafile.yml 25 | - configs/gcnet/metafile.yml 26 | - configs/gfl/metafile.yml 27 | - configs/ghm/metafile.yml 28 | - configs/gn/metafile.yml 29 | - configs/gn+ws/metafile.yml 30 | - configs/grid_rcnn/metafile.yml 31 | - configs/groie/metafile.yml 32 | - configs/guided_anchoring/metafile.yml 33 | - configs/hrnet/metafile.yml 34 | - configs/htc/metafile.yml 35 | - configs/instaboost/metafile.yml 36 | - configs/lad/metafile.yml 37 | - configs/ld/metafile.yml 38 | - configs/libra_rcnn/metafile.yml 39 | - configs/mask_rcnn/metafile.yml 40 | - configs/ms_rcnn/metafile.yml 41 | - configs/nas_fcos/metafile.yml 42 | - configs/nas_fpn/metafile.yml 43 | - configs/paa/metafile.yml 44 | - configs/pafpn/metafile.yml 45 | - configs/panoptic_fpn/metafile.yml 46 | - configs/pvt/metafile.yml 47 | - configs/pisa/metafile.yml 48 | - configs/point_rend/metafile.yml 49 | - configs/queryinst/metafile.yml 50 | - configs/regnet/metafile.yml 51 | - configs/reppoints/metafile.yml 52 | - configs/res2net/metafile.yml 53 | - configs/resnest/metafile.yml 54 | - configs/retinanet/metafile.yml 55 | - configs/sabl/metafile.yml 56 | - configs/scnet/metafile.yml 57 | - configs/scratch/metafile.yml 58 | - configs/seesaw_loss/metafile.yml 59 | - configs/sparse_rcnn/metafile.yml 60 | - configs/solo/metafile.yml 61 | - configs/ssd/metafile.yml 62 | - configs/swin/metafile.yml 63 | - configs/tridentnet/metafile.yml 64 | - configs/tood/metafile.yml 65 | - configs/vfnet/metafile.yml 66 | - configs/yolact/metafile.yml 67 | - configs/yolo/metafile.yml 68 | - configs/yolof/metafile.yml 69 | - configs/yolox/metafile.yml 70 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/mask/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcv 3 | import numpy as np 4 | import pycocotools.mask as mask_util 5 | 6 | 7 | def split_combined_polys(polys, poly_lens, polys_per_mask): 8 | """Split the combined 1-D polys into masks. 9 | 10 | A mask is represented as a list of polys, and a poly is represented as 11 | a 1-D array. In dataset, all masks are concatenated into a single 1-D 12 | tensor. Here we need to split the tensor into original representations. 13 | 14 | Args: 15 | polys (list): a list (length = image num) of 1-D tensors 16 | poly_lens (list): a list (length = image num) of poly length 17 | polys_per_mask (list): a list (length = image num) of poly number 18 | of each mask 19 | 20 | Returns: 21 | list: a list (length = image num) of list (length = mask num) of \ 22 | list (length = poly num) of numpy array. 23 | """ 24 | mask_polys_list = [] 25 | for img_id in range(len(polys)): 26 | polys_single = polys[img_id] 27 | polys_lens_single = poly_lens[img_id].tolist() 28 | polys_per_mask_single = polys_per_mask[img_id].tolist() 29 | 30 | split_polys = mmcv.slice_list(polys_single, polys_lens_single) 31 | mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) 32 | mask_polys_list.append(mask_polys) 33 | return mask_polys_list 34 | 35 | 36 | # TODO: move this function to more proper place 37 | def encode_mask_results(mask_results): 38 | """Encode bitmap mask to RLE code. 39 | 40 | Args: 41 | mask_results (list | tuple[list]): bitmap mask results. 42 | In mask scoring rcnn, mask_results is a tuple of (segm_results, 43 | segm_cls_score). 44 | 45 | Returns: 46 | list | tuple: RLE encoded mask. 47 | """ 48 | if isinstance(mask_results, tuple): # mask scoring 49 | cls_segms, cls_mask_scores = mask_results 50 | else: 51 | cls_segms = mask_results 52 | num_classes = len(cls_segms) 53 | encoded_mask_results = [[] for _ in range(num_classes)] 54 | for i in range(len(cls_segms)): 55 | for cls_segm in cls_segms[i]: 56 | encoded_mask_results[i].append( 57 | mask_util.encode( 58 | np.array( 59 | cls_segm[:, :, np.newaxis], order='F', 60 | dtype='uint8'))[0]) # encoded with RLE 61 | if isinstance(mask_results, tuple): 62 | return encoded_mask_results, cls_mask_scores 63 | else: 64 | return encoded_mask_results 65 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/yolox_lrupdater_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.runner.hooks import HOOKS 3 | from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, 4 | annealing_cos) 5 | 6 | 7 | @HOOKS.register_module() 8 | class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): 9 | """YOLOX learning rate scheme. 10 | 11 | There are two main differences between YOLOXLrUpdaterHook 12 | and CosineAnnealingLrUpdaterHook. 13 | 14 | 1. When the current running epoch is greater than 15 | `max_epoch-last_epoch`, a fixed learning rate will be used 16 | 2. The exp warmup scheme is different with LrUpdaterHook in MMCV 17 | 18 | Args: 19 | num_last_epochs (int): The number of epochs with a fixed learning rate 20 | before the end of the training. 21 | """ 22 | 23 | def __init__(self, num_last_epochs, **kwargs): 24 | self.num_last_epochs = num_last_epochs 25 | super(YOLOXLrUpdaterHook, self).__init__(**kwargs) 26 | 27 | def get_warmup_lr(self, cur_iters): 28 | 29 | def _get_warmup_lr(cur_iters, regular_lr): 30 | # exp warmup scheme 31 | k = self.warmup_ratio * pow( 32 | (cur_iters + 1) / float(self.warmup_iters), 2) 33 | warmup_lr = [_lr * k for _lr in regular_lr] 34 | return warmup_lr 35 | 36 | if isinstance(self.base_lr, dict): 37 | lr_groups = {} 38 | for key, base_lr in self.base_lr.items(): 39 | lr_groups[key] = _get_warmup_lr(cur_iters, base_lr) 40 | return lr_groups 41 | else: 42 | return _get_warmup_lr(cur_iters, self.base_lr) 43 | 44 | def get_lr(self, runner, base_lr): 45 | last_iter = len(runner.data_loader) * self.num_last_epochs 46 | 47 | if self.by_epoch: 48 | progress = runner.epoch 49 | max_progress = runner.max_epochs 50 | else: 51 | progress = runner.iter 52 | max_progress = runner.max_iters 53 | 54 | progress += 1 55 | 56 | if self.min_lr_ratio is not None: 57 | target_lr = base_lr * self.min_lr_ratio 58 | else: 59 | target_lr = self.min_lr 60 | 61 | if progress >= max_progress - last_iter: 62 | # fixed learning rate 63 | return target_lr 64 | else: 65 | return annealing_cos( 66 | base_lr, target_lr, (progress - self.warmup_iters) / 67 | (max_progress - self.warmup_iters - last_iter)) 68 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/datasets/dota_detection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Author: error: git config user.name && git config user.email & please set dead value or install git 3 | Date: 2022-05-06 09:04:16 4 | LastEditors: error: git config user.name && git config user.email & please set dead value or install git 5 | LastEditTime: 2022-06-07 14:39:15 6 | FilePath: /mmdetection-2.22.0/configs/_base_/datasets/dota_detection.py 7 | Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE 8 | ''' 9 | # dataset settings 10 | dataset_type = 'CocoCPDataset' 11 | data_root = '/media/h/H/DOTA10_512_128/' 12 | # data_root = '/media/h/H/ISAID_512_128/' 13 | img_norm_cfg = dict( 14 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 15 | train_pipeline = [ 16 | dict(type='LoadImageFromFile'), 17 | dict(type='LoadAnnotations', with_bbox=True), 18 | dict(type='Resize', img_scale=(512, 512), keep_ratio=True), 19 | dict(type='RandomFlip', flip_ratio=0.5), 20 | dict(type='Normalize', **img_norm_cfg), 21 | dict(type='Pad', size_divisor=32), 22 | dict(type='DefaultFormatBundle'), 23 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), 24 | ] 25 | test_pipeline = [ 26 | dict(type='LoadImageFromFile'), 27 | dict( 28 | type='MultiScaleFlipAug', 29 | img_scale=(512, 512), 30 | flip=False, 31 | transforms=[ 32 | dict(type='Resize', keep_ratio=True), 33 | dict(type='RandomFlip'), 34 | dict(type='Normalize', **img_norm_cfg), 35 | dict(type='Pad', size_divisor=32), 36 | dict(type='ImageToTensor', keys=['img']), 37 | dict(type='Collect', keys=['img']), 38 | ]) 39 | ] 40 | data = dict( 41 | samples_per_gpu=8, 42 | workers_per_gpu=0, 43 | train=dict( 44 | type=dataset_type, 45 | ann_file= '/media/h/M/P2B/1dota/P2B_work_dirs/work_dirs/P2B_DOTA_1024_0.0005_stage2_basescales0_bs2/DOTA_train_512.json', 46 | # ann_file=data_root + 'annotations/DOTA_train_512_coarse_seg.json', 47 | img_prefix=data_root + 'train/images/', 48 | pipeline=train_pipeline), 49 | val=dict( 50 | type=dataset_type, 51 | ann_file=data_root + 'annotations/DOTA_train_512_coarse_seg.json', 52 | img_prefix=data_root + 'train/images/', 53 | pipeline=test_pipeline), 54 | test=dict( 55 | type=dataset_type, 56 | ann_file=data_root + 'annotations/DOTA_train_512_coarse_seg.json', 57 | img_prefix=data_root + 'train/images/', 58 | pipeline=test_pipeline)) 59 | evaluation = dict(interval = 12,metric=['bbox']) 60 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/detectors/weak_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class WeakRCNN(TwoStageDetector): 7 | """Implementation of `Fast R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | pretrained=None): 16 | super(WeakRCNN, self).__init__( 17 | backbone=backbone, 18 | neck=neck, 19 | roi_head=roi_head, 20 | train_cfg=train_cfg, 21 | test_cfg=test_cfg, 22 | pretrained=pretrained) 23 | 24 | def forward_train(self, 25 | img, 26 | img_metas, 27 | gt_labels, 28 | proposals=None, 29 | **kwargs): 30 | 31 | x = self.extract_feat(img) 32 | 33 | losses = dict() 34 | 35 | proposal_list = proposals 36 | 37 | roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, 38 | gt_labels, **kwargs) 39 | losses.update(roi_losses) 40 | return losses 41 | 42 | 43 | def forward_test(self, imgs, img_metas, proposals, **kwargs): 44 | for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: 45 | if not isinstance(var, list): 46 | raise TypeError(f'{name} must be a list, but got {type(var)}') 47 | 48 | num_augs = len(imgs) 49 | if num_augs != len(img_metas): 50 | raise ValueError(f'num of augmentations ({len(imgs)}) ' 51 | f'!= num of image meta ({len(img_metas)})') 52 | 53 | if num_augs == 1: 54 | return self.simple_test(imgs[0], img_metas[0], proposals[0], 55 | **kwargs) 56 | else: 57 | assert imgs[0].size(0) == 1, 'aug test does not support ' \ 58 | 'inference with batch size ' \ 59 | f'{imgs[0].size(0)}' 60 | return self.aug_test(imgs, img_metas, proposals, **kwargs) 61 | 62 | 63 | def aug_test(self, imgs, img_metas, proposal_list, rescale=False): 64 | """Test with augmentations. 65 | 66 | If rescale is False, then returned bboxes and masks will fit the scale 67 | of imgs[0]. 68 | """ 69 | x = self.extract_feats(imgs) 70 | return self.roi_head.aug_test( 71 | x, proposal_list, img_metas, rescale=rescale) 72 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/evaluation/bbox_overlaps.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import numpy as np 3 | 4 | 5 | def bbox_overlaps(bboxes1, 6 | bboxes2, 7 | mode='iou', 8 | eps=1e-6, 9 | use_legacy_coordinate=False): 10 | """Calculate the ious between each bbox of bboxes1 and bboxes2. 11 | 12 | Args: 13 | bboxes1 (ndarray): Shape (n, 4) 14 | bboxes2 (ndarray): Shape (k, 4) 15 | mode (str): IOU (intersection over union) or IOF (intersection 16 | over foreground) 17 | use_legacy_coordinate (bool): Whether to use coordinate system in 18 | mmdet v1.x. which means width, height should be 19 | calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. 20 | Note when function is used in `VOCDataset`, it should be 21 | True to align with the official implementation 22 | `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar` 23 | Default: False. 24 | 25 | Returns: 26 | ious (ndarray): Shape (n, k) 27 | """ 28 | 29 | assert mode in ['iou', 'iof'] 30 | if not use_legacy_coordinate: 31 | extra_length = 0. 32 | else: 33 | extra_length = 1. 34 | bboxes1 = bboxes1.astype(np.float32) 35 | bboxes2 = bboxes2.astype(np.float32) 36 | rows = bboxes1.shape[0] 37 | cols = bboxes2.shape[0] 38 | ious = np.zeros((rows, cols), dtype=np.float32) 39 | if rows * cols == 0: 40 | return ious 41 | exchange = False 42 | if bboxes1.shape[0] > bboxes2.shape[0]: 43 | bboxes1, bboxes2 = bboxes2, bboxes1 44 | ious = np.zeros((cols, rows), dtype=np.float32) 45 | exchange = True 46 | area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * ( 47 | bboxes1[:, 3] - bboxes1[:, 1] + extra_length) 48 | area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * ( 49 | bboxes2[:, 3] - bboxes2[:, 1] + extra_length) 50 | for i in range(bboxes1.shape[0]): 51 | x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) 52 | y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) 53 | x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) 54 | y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) 55 | overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum( 56 | y_end - y_start + extra_length, 0) 57 | if mode == 'iou': 58 | union = area1[i] + area2 - overlap 59 | else: 60 | union = area1[i] if not exchange else area2 61 | union = np.maximum(union, eps) 62 | ious[i, :] = overlap / union 63 | if exchange: 64 | ious = ious.T 65 | return ious 66 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/anchor/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | 5 | def images_to_levels(target, num_levels): 6 | """Convert targets by image to targets by feature level. 7 | 8 | [target_img0, target_img1] -> [target_level0, target_level1, ...] 9 | """ 10 | target = torch.stack(target, 0) 11 | level_targets = [] 12 | start = 0 13 | for n in num_levels: 14 | end = start + n 15 | # level_targets.append(target[:, start:end].squeeze(0)) 16 | level_targets.append(target[:, start:end]) 17 | start = end 18 | return level_targets 19 | 20 | 21 | def anchor_inside_flags(flat_anchors, 22 | valid_flags, 23 | img_shape, 24 | allowed_border=0): 25 | """Check whether the anchors are inside the border. 26 | 27 | Args: 28 | flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). 29 | valid_flags (torch.Tensor): An existing valid flags of anchors. 30 | img_shape (tuple(int)): Shape of current image. 31 | allowed_border (int, optional): The border to allow the valid anchor. 32 | Defaults to 0. 33 | 34 | Returns: 35 | torch.Tensor: Flags indicating whether the anchors are inside a \ 36 | valid range. 37 | """ 38 | img_h, img_w = img_shape[:2] 39 | if allowed_border >= 0: 40 | inside_flags = valid_flags & \ 41 | (flat_anchors[:, 0] >= -allowed_border) & \ 42 | (flat_anchors[:, 1] >= -allowed_border) & \ 43 | (flat_anchors[:, 2] < img_w + allowed_border) & \ 44 | (flat_anchors[:, 3] < img_h + allowed_border) 45 | else: 46 | inside_flags = valid_flags 47 | return inside_flags 48 | 49 | 50 | def calc_region(bbox, ratio, featmap_size=None): 51 | """Calculate a proportional bbox region. 52 | 53 | The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. 54 | 55 | Args: 56 | bbox (Tensor): Bboxes to calculate regions, shape (n, 4). 57 | ratio (float): Ratio of the output region. 58 | featmap_size (tuple): Feature map size used for clipping the boundary. 59 | 60 | Returns: 61 | tuple: x1, y1, x2, y2 62 | """ 63 | x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() 64 | y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() 65 | x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() 66 | y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() 67 | if featmap_size is not None: 68 | x1 = x1.clamp(min=0, max=featmap_size[1]) 69 | y1 = y1.clamp(min=0, max=featmap_size[0]) 70 | x2 = x2.clamp(min=0, max=featmap_size[1]) 71 | y2 = y2.clamp(min=0, max=featmap_size[0]) 72 | return (x1, y1, x2, y2) 73 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/backbones/vgg.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | import warnings 4 | import torchvision.models as models 5 | from ..builder import BACKBONES 6 | from mmcv.runner import BaseModule 7 | 8 | @BACKBONES.register_module() 9 | class VGG16(BaseModule): 10 | def __init__(self, pretrained = None, init_cfg=None): 11 | super(VGG16, self).__init__() 12 | 13 | self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=True) 14 | self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True) 15 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True) 16 | self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True) 17 | self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True) 18 | self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True) 19 | self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True) 20 | self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True) 21 | self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True) 22 | self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True) 23 | self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True) 24 | self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True) 25 | self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True) 26 | self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=2, dilation=2, bias=True) 27 | self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=2, dilation=2, bias=True) 28 | self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=2, dilation=2, bias=True) 29 | 30 | self.conv1_1.requires_grad = False 31 | self.conv1_2.requires_grad = False 32 | self.conv2_1.requires_grad = False 33 | self.conv2_2.requires_grad = False 34 | 35 | self.init_cfg = init_cfg 36 | self.pretrained =pretrained 37 | def forward(self, x): 38 | x = F.relu(self.conv1_1(x), inplace=True) 39 | x = F.relu(self.conv1_2(x), inplace=True) 40 | x = self.pool1(x) 41 | x = F.relu(self.conv2_1(x), inplace=True) 42 | x = F.relu(self.conv2_2(x), inplace=True) 43 | x = self.pool2(x) 44 | x = F.relu(self.conv3_1(x), inplace=True) 45 | x = F.relu(self.conv3_2(x), inplace=True) 46 | x = F.relu(self.conv3_3(x), inplace=True) 47 | x = self.pool3(x) 48 | x = F.relu(self.conv4_1(x), inplace=True) 49 | x = F.relu(self.conv4_2(x), inplace=True) 50 | x = F.relu(self.conv4_3(x), inplace=True) 51 | x = F.relu(self.conv5_1(x), inplace=True) 52 | x = F.relu(self.conv5_2(x), inplace=True) 53 | x = F.relu(self.conv5_3(x), inplace=True) 54 | return [x] 55 | 56 | def init_weights(self): 57 | # # super(VGG16, self).init_weights() 58 | pass 59 | -------------------------------------------------------------------------------- /PLUG-Det/configs_wsod/wsod2_vgg16.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | # model settings 3 | model = dict( 4 | type='WeakRCNN', 5 | pretrained=None, 6 | backbone=dict(type='VGG16'), 7 | neck=None, 8 | roi_head=dict( 9 | type='WSOD2RoIHead', 10 | steps=40000, 11 | bbox_roi_extractor=dict( 12 | type='SingleRoIExtractor', 13 | roi_layer=dict(type='RoIPool', output_size=7), 14 | out_channels=512, 15 | featmap_strides=[8]), 16 | bbox_head=dict( 17 | type='OICRHead', 18 | in_channels=512, 19 | hidden_channels=4096, 20 | roi_feat_size=7, 21 | bbox_coder=dict( 22 | type='DeltaXYWHBBoxCoder', 23 | target_means=[0., 0., 0., 0.], 24 | target_stds=[0.1, 0.1, 0.2, 0.2]), 25 | num_classes=20)) 26 | ) 27 | # dataset settings 28 | dataset_type = 'VOCSSDataset' 29 | data_root = '/datavoc/VOCdevkit/' 30 | img_norm_cfg = dict( 31 | mean=[104., 117., 124.], std=[1., 1., 1.], to_rgb=False) 32 | train_pipeline = [ 33 | dict(type='LoadImageFromFile'), 34 | dict(type='LoadSuperPixelFromFile'), 35 | dict(type='LoadWeakAnnotations'), 36 | dict(type='LoadProposals'), 37 | dict(type='Resize', img_scale=[(488, 2000), (576, 2000), (688, 2000), (864, 2000), (1200, 2000)], keep_ratio=True, multiscale_mode='value'), 38 | dict(type='RandomFlip', flip_ratio=0.5), 39 | dict(type='Normalize', **img_norm_cfg), 40 | dict(type='Pad', size_divisor=32), 41 | dict(type='DefaultFormatBundle'), 42 | dict(type='Collect', keys=['img', 'gt_labels', 'proposals', 'ss']), 43 | ] 44 | test_pipeline = [ 45 | dict(type='LoadImageFromFile'), 46 | dict(type='LoadProposals'), 47 | dict( 48 | type='MultiScaleFlipAug', 49 | img_scale=(688, 2000), 50 | #img_scale=[(500, 2000), (600, 2000), (700, 2000), (800, 2000), (900, 2000)], 51 | flip=False, 52 | transforms=[ 53 | dict(type='Resize', keep_ratio=True), 54 | dict(type='RandomFlip'), 55 | dict(type='Normalize', **img_norm_cfg), 56 | dict(type='Pad', size_divisor=32), 57 | dict(type='ImageToTensor', keys=['img']), 58 | dict(type='Collect', keys=['img', 'proposals']), 59 | ]) 60 | ] 61 | data = dict( 62 | samples_per_gpu=1, 63 | workers_per_gpu=2, 64 | train=dict( 65 | type=dataset_type, 66 | ann_file=data_root + 'VOC2007/ImageSets/Main/trainval.txt', 67 | img_prefix=data_root + 'VOC2007/', 68 | proposal_file='/datavoc/selective_search_data/voc_2007_trainval.pkl', 69 | pipeline=train_pipeline), 70 | val=dict( 71 | type=dataset_type, 72 | ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', 73 | img_prefix=data_root + 'VOC2007/', 74 | proposal_file='/datavoc/selective_search_data/voc_2007_test.pkl', 75 | pipeline=test_pipeline), 76 | test=dict( 77 | type=dataset_type, 78 | ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', 79 | img_prefix=data_root + 'VOC2007/', 80 | proposal_file='/datavoc/selective_search_data/voc_2007_test.pkl', 81 | pipeline=test_pipeline)) 82 | 83 | work_dir = '/media/h/M/P2B/1dota/WSOD/wsod2_vgg16/' 84 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/bar_chart.py: -------------------------------------------------------------------------------- 1 | import xlrd # 导入库 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import mpl_toolkits.axisartist as axisartist 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import glob 8 | from natsort import natsorted 9 | import mmcv 10 | 11 | num_json_list = glob.glob('/media/h/M/P2B/1dota/nofuse_r0/num_ori/*/results.bbox.json') 12 | num_json_list = natsorted(num_json_list) 13 | all_num_mean = [] 14 | all_num_max = [] 15 | all_num_min = [] 16 | num_dict = dict() 17 | for num_json in num_json_list: 18 | num_bbox_all = mmcv.load(num_json) 19 | num_scores_all = [bbox['score'] for bbox in num_bbox_all ] 20 | num_scores_all = np.array(num_scores_all) 21 | obj_num = num_scores_all.shape[0] 22 | num_dict[int(num_json.split('/')[-2])] = dict() 23 | num_dict[int(num_json.split('/')[-2])]['num'] = obj_num 24 | num_dict[int(num_json.split('/')[-2])]['scores'] = num_scores_all 25 | range_index = 40 26 | max_index = np.ceil(279/range_index) 27 | plot_x_list = [] 28 | plot_y_list = [] 29 | for i in np.arange (max_index): 30 | min_ = int(i * range_index + 1) 31 | max_ = int((i+1) * range_index) 32 | num_temp = 0 33 | score_temp = [] 34 | for j in np.arange(min_, max_+1): 35 | if j in num_dict.keys(): 36 | num_temp += num_dict[j]['num'] 37 | score_temp.append(num_dict[j]['scores']) 38 | # plot_x_list.append(str(min_)+'-'+str(max_)) 39 | plot_x_list.append(str(max_)) 40 | if num_temp == 0: 41 | plot_y_list.append(0) 42 | else: 43 | plot_y_list.append(sum([sum(score_temp_) for score_temp_ in score_temp])/num_temp) 44 | 45 | font = {'family': 'Calibri', 46 | 'size': 30, 47 | } 48 | #并列柱状图 49 | font_size = 24 50 | fig, ax = plt.subplots(figsize=(10,7)) 51 | x_width = np.arange(0,(len(plot_x_list)+1)*20,20).tolist() 52 | label_width = [i for i in x_width] 53 | plot_y_list_x = [x_width_ +2 for x_width_ in x_width[:-1]] 54 | ax.bar(plot_y_list_x, plot_y_list, color = 'pink', width = 16, align = 'edge', edgecolor = 'black', linewidth = 0.5, label = 'w/o SGA module') 55 | xticks = ax.set_xticks(label_width) 56 | xticks = ax.set_xticklabels([str(0)]+plot_x_list, fontsize = font_size) 57 | yticks = ax.set_yticklabels([0.3, 0.35,0.4, 0.45, 0.5, 0.525],fontsize = font_size) 58 | ax.set_ylim(0.3, 0.525) 59 | for i in range(len(plot_y_list)): 60 | xy1 = ([i+10 for i in x_width][i], plot_y_list[i]+0.005) 61 | if round(plot_y_list[i],3) !=0: 62 | text1 = str(round(plot_y_list[i],3)) 63 | ax.annotate(text1, xy1, fontsize=font_size, color='black', ha = 'center', va = 'baseline') 64 | ax.set_xlabel('numbers', fontdict = font, ) 65 | ax.set_ylabel('mean IoU', fontdict = font,) 66 | manager = plt.get_current_fig_manager() 67 | manager.window.showMaximized() # QT backend 68 | # manager.resize(*manager.window.maxsize()) # TKAgg backend 69 | # manager.frame.Maximize(True)# WX backend 70 | plt.scatter(1,0.520,s=300,color='b',marker='*', ) 71 | plt.annotate('0.520', (3.7, 0.516), fontsize=font_size, color='blue') 72 | plt.show() 73 | # fig.savefig('/media/h/M/P2B/1dota/SGA.eps', dpi = 300, bbox_inches = 'tight') 74 | 75 | 76 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/losses/accuracy.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcv 3 | import torch.nn as nn 4 | 5 | 6 | @mmcv.jit(coderize=True) 7 | def accuracy(pred, target, topk=1, thresh=None): 8 | """Calculate accuracy according to the prediction and target. 9 | 10 | Args: 11 | pred (torch.Tensor): The model prediction, shape (N, num_class) 12 | target (torch.Tensor): The target of each prediction, shape (N, ) 13 | topk (int | tuple[int], optional): If the predictions in ``topk`` 14 | matches the target, the predictions will be regarded as 15 | correct ones. Defaults to 1. 16 | thresh (float, optional): If not None, predictions with scores under 17 | this threshold are considered incorrect. Default to None. 18 | 19 | Returns: 20 | float | tuple[float]: If the input ``topk`` is a single integer, 21 | the function will return a single float as accuracy. If 22 | ``topk`` is a tuple containing multiple integers, the 23 | function will return a tuple containing accuracies of 24 | each ``topk`` number. 25 | """ 26 | assert isinstance(topk, (int, tuple)) 27 | if isinstance(topk, int): 28 | topk = (topk, ) 29 | return_single = True 30 | else: 31 | return_single = False 32 | 33 | maxk = max(topk) 34 | if pred.size(0) == 0: 35 | accu = [pred.new_tensor(0.) for i in range(len(topk))] 36 | return accu[0] if return_single else accu 37 | assert pred.ndim == 2 and target.ndim == 1 38 | assert pred.size(0) == target.size(0) 39 | assert maxk <= pred.size(1), \ 40 | f'maxk {maxk} exceeds pred dimension {pred.size(1)}' 41 | pred_value, pred_label = pred.topk(maxk, dim=1) 42 | pred_label = pred_label.t() # transpose to shape (maxk, N) 43 | correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) 44 | if thresh is not None: 45 | # Only prediction values larger than thresh are counted as correct 46 | correct = correct & (pred_value > thresh).t() 47 | res = [] 48 | for k in topk: 49 | correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) 50 | res.append(correct_k.mul_(100.0 / pred.size(0))) 51 | return res[0] if return_single else res 52 | 53 | 54 | class Accuracy(nn.Module): 55 | 56 | def __init__(self, topk=(1, ), thresh=None): 57 | """Module to calculate the accuracy. 58 | 59 | Args: 60 | topk (tuple, optional): The criterion used to calculate the 61 | accuracy. Defaults to (1,). 62 | thresh (float, optional): If not None, predictions with scores 63 | under this threshold are considered incorrect. Default to None. 64 | """ 65 | super().__init__() 66 | self.topk = topk 67 | self.thresh = thresh 68 | 69 | def forward(self, pred, target): 70 | """Forward function to calculate accuracy. 71 | 72 | Args: 73 | pred (torch.Tensor): Prediction of models. 74 | target (torch.Tensor): Target for each prediction. 75 | 76 | Returns: 77 | tuple[float]: The accuracies under different topk criterions. 78 | """ 79 | return accuracy(pred, target, self.topk, self.thresh) 80 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/hook/sync_random_size_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import random 3 | import warnings 4 | 5 | import torch 6 | from mmcv.runner import get_dist_info 7 | from mmcv.runner.hooks import HOOKS, Hook 8 | from torch import distributed as dist 9 | 10 | 11 | @HOOKS.register_module() 12 | class SyncRandomSizeHook(Hook): 13 | """Change and synchronize the random image size across ranks. 14 | SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve 15 | similar functions. Such as `dict(type='Resize', img_scale=[(448, 448), 16 | (832, 832)], multiscale_mode='range', keep_ratio=True)`. 17 | 18 | Note: Due to the multi-process dataloader, its behavior is different 19 | from YOLOX's official implementation, the official is to change the 20 | size every fixed iteration interval and what we achieved is a fixed 21 | epoch interval. 22 | 23 | Args: 24 | ratio_range (tuple[int]): Random ratio range. It will be multiplied 25 | by 32, and then change the dataset output image size. 26 | Default: (14, 26). 27 | img_scale (tuple[int]): Size of input image. Default: (640, 640). 28 | interval (int): The epoch interval of change image size. Default: 1. 29 | device (torch.device | str): device for returned tensors. 30 | Default: 'cuda'. 31 | """ 32 | 33 | def __init__(self, 34 | ratio_range=(14, 26), 35 | img_scale=(640, 640), 36 | interval=1, 37 | device='cuda'): 38 | warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. ' 39 | 'Please use Resize pipeline to achieve similar ' 40 | 'functions. Due to the multi-process dataloader, ' 41 | 'its behavior is different from YOLOX\'s official ' 42 | 'implementation, the official is to change the size ' 43 | 'every fixed iteration interval and what we achieved ' 44 | 'is a fixed epoch interval.') 45 | self.rank, world_size = get_dist_info() 46 | self.is_distributed = world_size > 1 47 | self.ratio_range = ratio_range 48 | self.img_scale = img_scale 49 | self.interval = interval 50 | self.device = device 51 | 52 | def after_train_epoch(self, runner): 53 | """Change the dataset output image size.""" 54 | if self.ratio_range is not None and (runner.epoch + 55 | 1) % self.interval == 0: 56 | # Due to DDP and DP get the device behavior inconsistent, 57 | # so we did not get the device from runner.model. 58 | tensor = torch.LongTensor(2).to(self.device) 59 | 60 | if self.rank == 0: 61 | size_factor = self.img_scale[1] * 1. / self.img_scale[0] 62 | size = random.randint(*self.ratio_range) 63 | size = (int(32 * size), 32 * int(size * size_factor)) 64 | tensor[0] = size[0] 65 | tensor[1] = size[1] 66 | 67 | if self.is_distributed: 68 | dist.barrier() 69 | dist.broadcast(tensor, 0) 70 | 71 | runner.data_loader.dataset.update_dynamic_scale( 72 | (tensor[0].item(), tensor[1].item())) 73 | -------------------------------------------------------------------------------- /PLUG-Det/configs_wsod/wsddn_vgg16.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | # model settings 3 | dataset_type = 'CocoCPDataset' 4 | data_root = '/media/h/H/DOTA10_512_128/' 5 | img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | train_pipeline = [ 7 | dict(type='LoadImageFromFile'), 8 | dict(type='LoadWeakAnnotations'), 9 | dict(type='LoadProposals'), 10 | dict(type='Resize', img_scale=(512, 512)), 11 | dict(type='RandomFlip', flip_ratio=0.5), 12 | dict(type='Normalize', **img_norm_cfg), 13 | dict(type='Pad', size_divisor=32), 14 | dict(type='DefaultFormatBundle'), 15 | dict(type='Collect', keys=['img', 'gt_labels', 'proposals']), 16 | ] 17 | test_pipeline = [ 18 | dict(type='LoadImageFromFile'), 19 | dict(type='LoadProposals'), 20 | dict( 21 | type='MultiScaleFlipAug', 22 | img_scale=(512, 512), 23 | #img_scale=[(500, 2000), (600, 2000), (700, 2000), (800, 2000), (900, 2000)], 24 | flip=False, 25 | transforms=[ 26 | dict(type='Resize', keep_ratio=True), 27 | dict(type='RandomFlip'), 28 | dict(type='Normalize', **img_norm_cfg), 29 | dict(type='Pad', size_divisor=32), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img', 'proposals']), 32 | ]) 33 | ] 34 | data = dict( 35 | samples_per_gpu=8, workers_per_gpu=0, 36 | train=dict( 37 | # min_gt_size=2, # add 38 | type=dataset_type, 39 | ann_file=data_root + '/annotations/DOTA_train_512_coarse.json', 40 | # ann_file=data_root + '/annotations/DOTA_train_512_center.json', 41 | img_prefix=data_root + '/train/images', 42 | proposal_file= data_root + '/train/SSW/selective_search.pkl', 43 | pipeline=train_pipeline 44 | ), 45 | val=dict( 46 | # min_gt_size=2, # add 47 | type=dataset_type, 48 | ann_file=data_root + '/annotations/DOTA_train_512_coarse.json', 49 | img_prefix=data_root + '/train/images', 50 | proposal_file= data_root + '/train/SSW/selective_search.pkl', 51 | pipeline=test_pipeline, 52 | # test_mode=False # modified 53 | ), 54 | test=dict( 55 | samples_per_gpu=1, 56 | type=dataset_type, 57 | ann_file=data_root + '/annotations/DOTA_train_512_coarse.json', 58 | # ann_file=data_root + '/annotations/DOTA_train_512_center.json', 59 | img_prefix=data_root + '/train/images', 60 | pipeline=test_pipeline 61 | ) 62 | ) 63 | model = dict( 64 | type='WeakRCNN', 65 | pretrained='/home/h/checkpoints/vgg16_caffe-292e1171.pth', 66 | backbone=dict(type='VGG16'), 67 | # backbone=dict(type='VGG16', init_cfg=dict(type='Pretrained', checkpoint='/home/h/checkpoints/vgg16_caffe-292e1171.pth')), 68 | neck=None, 69 | roi_head=dict( 70 | type='WSDDNRoIHead', 71 | bbox_roi_extractor=dict( 72 | type='SingleRoIExtractor', 73 | roi_layer=dict(type='RoIPool', output_size=7), 74 | out_channels=512, 75 | featmap_strides=[8]), 76 | bbox_head=dict( 77 | type='WSDDNHead', 78 | in_channels=512, 79 | hidden_channels=4096, 80 | roi_feat_size=7, 81 | num_classes=15)), 82 | ) 83 | work_dir = '/media/h/M/P2B/1dota/WSOD/wsddn_vgg16temp/' 84 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | import torch 5 | import torch.nn as nn 6 | from mmcv import ops 7 | from mmcv.runner import BaseModule 8 | 9 | 10 | class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): 11 | """Base class for RoI extractor. 12 | 13 | Args: 14 | roi_layer (dict): Specify RoI layer type and arguments. 15 | out_channels (int): Output channels of RoI layers. 16 | featmap_strides (int): Strides of input feature maps. 17 | init_cfg (dict or list[dict], optional): Initialization config dict. 18 | Default: None 19 | """ 20 | 21 | def __init__(self, 22 | roi_layer, 23 | out_channels, 24 | featmap_strides, 25 | init_cfg=None): 26 | super(BaseRoIExtractor, self).__init__(init_cfg) 27 | self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) 28 | self.out_channels = out_channels 29 | self.featmap_strides = featmap_strides 30 | self.fp16_enabled = False 31 | 32 | @property 33 | def num_inputs(self): 34 | """int: Number of input feature maps.""" 35 | return len(self.featmap_strides) 36 | 37 | def build_roi_layers(self, layer_cfg, featmap_strides): 38 | """Build RoI operator to extract feature from each level feature map. 39 | 40 | Args: 41 | layer_cfg (dict): Dictionary to construct and config RoI layer 42 | operation. Options are modules under ``mmcv/ops`` such as 43 | ``RoIAlign``. 44 | featmap_strides (List[int]): The stride of input feature map w.r.t 45 | to the original image size, which would be used to scale RoI 46 | coordinate (original image coordinate system) to feature 47 | coordinate system. 48 | 49 | Returns: 50 | nn.ModuleList: The RoI extractor modules for each level feature 51 | map. 52 | """ 53 | 54 | cfg = layer_cfg.copy() 55 | layer_type = cfg.pop('type') 56 | assert hasattr(ops, layer_type) 57 | layer_cls = getattr(ops, layer_type) 58 | roi_layers = nn.ModuleList( 59 | [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) 60 | return roi_layers 61 | 62 | def roi_rescale(self, rois, scale_factor): 63 | """Scale RoI coordinates by scale factor. 64 | 65 | Args: 66 | rois (torch.Tensor): RoI (Region of Interest), shape (n, 5) 67 | scale_factor (float): Scale factor that RoI will be multiplied by. 68 | 69 | Returns: 70 | torch.Tensor: Scaled RoI. 71 | """ 72 | 73 | cx = (rois[:, 1] + rois[:, 3]) * 0.5 74 | cy = (rois[:, 2] + rois[:, 4]) * 0.5 75 | w = rois[:, 3] - rois[:, 1] 76 | h = rois[:, 4] - rois[:, 2] 77 | new_w = w * scale_factor 78 | new_h = h * scale_factor 79 | x1 = cx - new_w * 0.5 80 | x2 = cx + new_w * 0.5 81 | y1 = cy - new_h * 0.5 82 | y2 = cy + new_h * 0.5 83 | new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) 84 | return new_rois 85 | 86 | @abstractmethod 87 | def forward(self, feats, rois, roi_scale_factor=None): 88 | pass 89 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/core/bbox/samplers/random_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from ..builder import BBOX_SAMPLERS 5 | from .base_sampler import BaseSampler 6 | 7 | 8 | @BBOX_SAMPLERS.register_module() 9 | class RandomSampler(BaseSampler): 10 | """Random sampler. 11 | 12 | Args: 13 | num (int): Number of samples 14 | pos_fraction (float): Fraction of positive samples 15 | neg_pos_up (int, optional): Upper bound number of negative and 16 | positive samples. Defaults to -1. 17 | add_gt_as_proposals (bool, optional): Whether to add ground truth 18 | boxes as proposals. Defaults to True. 19 | """ 20 | 21 | def __init__(self, 22 | num, 23 | pos_fraction, 24 | neg_pos_ub=-1, 25 | add_gt_as_proposals=True, 26 | **kwargs): 27 | from mmdet.core.bbox import demodata 28 | super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, 29 | add_gt_as_proposals) 30 | self.rng = demodata.ensure_rng(kwargs.get('rng', None)) 31 | 32 | def random_choice(self, gallery, num): 33 | """Random select some elements from the gallery. 34 | 35 | If `gallery` is a Tensor, the returned indices will be a Tensor; 36 | If `gallery` is a ndarray or list, the returned indices will be a 37 | ndarray. 38 | 39 | Args: 40 | gallery (Tensor | ndarray | list): indices pool. 41 | num (int): expected sample num. 42 | 43 | Returns: 44 | Tensor or ndarray: sampled indices. 45 | """ 46 | assert len(gallery) >= num 47 | 48 | is_tensor = isinstance(gallery, torch.Tensor) 49 | if not is_tensor: 50 | if torch.cuda.is_available(): 51 | device = torch.cuda.current_device() 52 | else: 53 | device = 'cpu' 54 | gallery = torch.tensor(gallery, dtype=torch.long, device=device) 55 | # This is a temporary fix. We can revert the following code 56 | # when PyTorch fixes the abnormal return of torch.randperm. 57 | # See: https://github.com/open-mmlab/mmdetection/pull/5014 58 | perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) 59 | rand_inds = gallery[perm] 60 | if not is_tensor: 61 | rand_inds = rand_inds.cpu().numpy() 62 | return rand_inds 63 | 64 | def _sample_pos(self, assign_result, num_expected, **kwargs): 65 | """Randomly sample some positive samples.""" 66 | pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) 67 | if pos_inds.numel() != 0: 68 | pos_inds = pos_inds.squeeze(1) 69 | if pos_inds.numel() <= num_expected: 70 | return pos_inds 71 | else: 72 | return self.random_choice(pos_inds, num_expected) 73 | 74 | def _sample_neg(self, assign_result, num_expected, **kwargs): 75 | """Randomly sample some negative samples.""" 76 | neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) 77 | if neg_inds.numel() != 0: 78 | neg_inds = neg_inds.squeeze(1) 79 | if len(neg_inds) <= num_expected: 80 | return neg_inds 81 | else: 82 | return self.random_choice(neg_inds, num_expected) 83 | -------------------------------------------------------------------------------- /PLUG-Det/tools/analysis_tools/get_flops.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import argparse 3 | 4 | import numpy as np 5 | import torch 6 | from mmcv import Config, DictAction 7 | 8 | from mmdet.models import build_detector 9 | 10 | try: 11 | from mmcv.cnn import get_model_complexity_info 12 | except ImportError: 13 | raise ImportError('Please upgrade mmcv to >0.6.2') 14 | 15 | 16 | def parse_args(): 17 | parser = argparse.ArgumentParser(description='Train a detector') 18 | parser.add_argument('--config', default='/media/h/M/P2B/1dota/fuse_r0/cascade_CPR_r50_FPN_1x_DOTA_512_trm.py',help='train config file path') 19 | parser.add_argument( 20 | '--shape', 21 | type=int, 22 | nargs='+', 23 | default=[800, 800], 24 | help='input image size') 25 | parser.add_argument( 26 | '--cfg-options', 27 | nargs='+', 28 | action=DictAction, 29 | help='override some settings in the used config, the key-value pair ' 30 | 'in xxx=yyy format will be merged into config file. If the value to ' 31 | 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 32 | 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 33 | 'Note that the quotation marks are necessary and that no white space ' 34 | 'is allowed.') 35 | parser.add_argument( 36 | '--size-divisor', 37 | type=int, 38 | default=32, 39 | help='Pad the input image, the minimum size that is divisible ' 40 | 'by size_divisor, -1 means do not pad the image.') 41 | args = parser.parse_args() 42 | return args 43 | 44 | 45 | def main(): 46 | 47 | args = parse_args() 48 | 49 | if len(args.shape) == 1: 50 | h = w = args.shape[0] 51 | elif len(args.shape) == 2: 52 | h, w = args.shape 53 | else: 54 | raise ValueError('invalid input shape') 55 | orig_shape = (3, h, w) 56 | divisor = args.size_divisor 57 | if divisor > 0: 58 | h = int(np.ceil(h / divisor)) * divisor 59 | w = int(np.ceil(w / divisor)) * divisor 60 | 61 | input_shape = (3, h, w) 62 | 63 | cfg = Config.fromfile(args.config) 64 | if args.cfg_options is not None: 65 | cfg.merge_from_dict(args.cfg_options) 66 | 67 | model = build_detector( 68 | cfg.model, 69 | train_cfg=cfg.get('train_cfg'), 70 | test_cfg=cfg.get('test_cfg')) 71 | if torch.cuda.is_available(): 72 | model.cuda() 73 | model.eval() 74 | 75 | if hasattr(model, 'forward_dummy'): 76 | model.forward = model.forward_dummy 77 | else: 78 | raise NotImplementedError( 79 | 'FLOPs counter is currently not currently supported with {}'. 80 | format(model.__class__.__name__)) 81 | 82 | flops, params = get_model_complexity_info(model, input_shape) 83 | split_line = '=' * 30 84 | 85 | if divisor > 0 and \ 86 | input_shape != orig_shape: 87 | print(f'{split_line}\nUse size divisor set input shape ' 88 | f'from {orig_shape} to {input_shape}\n') 89 | print(f'{split_line}\nInput shape: {input_shape}\n' 90 | f'Flops: {flops}\nParams: {params}\n{split_line}') 91 | print('!!!Please be cautious if you use the results in papers. ' 92 | 'You may need to check if all ops are supported and verify that the ' 93 | 'flops computation is correct.') 94 | 95 | 96 | if __name__ == '__main__': 97 | main() 98 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/losses/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import functools 3 | 4 | import mmcv 5 | import torch.nn.functional as F 6 | 7 | 8 | def reduce_loss(loss, reduction): 9 | """Reduce loss as specified. 10 | 11 | Args: 12 | loss (Tensor): Elementwise loss tensor. 13 | reduction (str): Options are "none", "mean" and "sum". 14 | 15 | Return: 16 | Tensor: Reduced loss tensor. 17 | """ 18 | reduction_enum = F._Reduction.get_enum(reduction) 19 | # none: 0, elementwise_mean:1, sum: 2 20 | if reduction_enum == 0: 21 | return loss 22 | elif reduction_enum == 1: 23 | return loss.mean() 24 | elif reduction_enum == 2: 25 | return loss.sum() 26 | 27 | 28 | @mmcv.jit(derivate=True, coderize=True) 29 | def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): 30 | """Apply element-wise weight and reduce loss. 31 | 32 | Args: 33 | loss (Tensor): Element-wise loss. 34 | weight (Tensor): Element-wise weights. 35 | reduction (str): Same as built-in losses of PyTorch. 36 | avg_factor (float): Average factor when computing the mean of losses. 37 | 38 | Returns: 39 | Tensor: Processed loss values. 40 | """ 41 | # if weight is specified, apply element-wise weight 42 | if weight is not None: 43 | loss = loss * weight 44 | 45 | # if avg_factor is not specified, just reduce the loss 46 | if avg_factor is None: 47 | loss = reduce_loss(loss, reduction) 48 | else: 49 | # if reduction is mean, then average the loss by avg_factor 50 | if reduction == 'mean': 51 | loss = loss.sum() / avg_factor 52 | # if reduction is 'none', then do nothing, otherwise raise an error 53 | elif reduction != 'none': 54 | raise ValueError('avg_factor can not be used with reduction="sum"') 55 | return loss 56 | 57 | 58 | def weighted_loss(loss_func): 59 | """Create a weighted version of a given loss function. 60 | 61 | To use this decorator, the loss function must have the signature like 62 | `loss_func(pred, target, **kwargs)`. The function only needs to compute 63 | element-wise loss without any reduction. This decorator will add weight 64 | and reduction arguments to the function. The decorated function will have 65 | the signature like `loss_func(pred, target, weight=None, reduction='mean', 66 | avg_factor=None, **kwargs)`. 67 | 68 | :Example: 69 | 70 | >>> import torch 71 | >>> @weighted_loss 72 | >>> def l1_loss(pred, target): 73 | >>> return (pred - target).abs() 74 | 75 | >>> pred = torch.Tensor([0, 2, 3]) 76 | >>> target = torch.Tensor([1, 1, 1]) 77 | >>> weight = torch.Tensor([1, 0, 1]) 78 | 79 | >>> l1_loss(pred, target) 80 | tensor(1.3333) 81 | >>> l1_loss(pred, target, weight) 82 | tensor(1.) 83 | >>> l1_loss(pred, target, reduction='none') 84 | tensor([1., 1., 2.]) 85 | >>> l1_loss(pred, target, weight, avg_factor=2) 86 | tensor(1.5000) 87 | """ 88 | 89 | @functools.wraps(loss_func) 90 | def wrapper(pred, 91 | target, 92 | weight=None, 93 | reduction='mean', 94 | avg_factor=None, 95 | **kwargs): 96 | # get element-wise loss 97 | loss = loss_func(pred, target, **kwargs) 98 | loss = weight_reduce_loss(loss, weight, reduction, avg_factor) 99 | return loss 100 | 101 | return wrapper 102 | -------------------------------------------------------------------------------- /PLUG-Det/tools/analysis_tools/eval_metric.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import argparse 3 | 4 | import mmcv 5 | from mmcv import Config, DictAction 6 | 7 | from mmdet.datasets import build_dataset 8 | 9 | 10 | def parse_args(): 11 | parser = argparse.ArgumentParser(description='Evaluate metric of the ' 12 | 'results saved in pkl format') 13 | parser.add_argument('--config', default = '/media/h/M/P2B/1dota/fuse_r0/seg/0.5/fasterrcnn_r50_1x/faster_rcnn_r50_fpn_1x_dota.py', help='Config of the model') 14 | parser.add_argument('--pkl_results', default = '/media/h/M/P2B/1dota/fuse_r0/seg/0.5/fasterrcnn_r50_1x/val/result.pkl', help='Results in pickle format') 15 | parser.add_argument( 16 | '--format-only', 17 | action='store_true', 18 | help='Format the output results without perform evaluation. It is' 19 | 'useful when you want to format the result to a specific format and ' 20 | 'submit it to the test server') 21 | parser.add_argument( 22 | '--eval', 23 | type=str, 24 | nargs='+', 25 | default=['bbox', 'segm'], 26 | help='Evaluation metrics, which depends on the dataset, e.g., "bbox",' 27 | ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') 28 | parser.add_argument( 29 | '--cfg-options', 30 | nargs='+', 31 | action=DictAction, 32 | help='override some settings in the used config, the key-value pair ' 33 | 'in xxx=yyy format will be merged into config file. If the value to ' 34 | 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 35 | 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 36 | 'Note that the quotation marks are necessary and that no white space ' 37 | 'is allowed.') 38 | parser.add_argument( 39 | '--eval-options', 40 | nargs='+', 41 | action=DictAction, 42 | default={ 43 | # 'jsonfile_prefix':'/media/h/M/P2B/1dota/fuse_r0/results_val', 44 | 'classwise':True, 45 | 'iou_thrs':[0.5],}, 46 | help='custom options for evaluation, the key-value pair in xxx=yyy ' 47 | 'format will be kwargs for dataset.evaluate() function') 48 | args = parser.parse_args() 49 | return args 50 | 51 | 52 | def main(): 53 | args = parse_args() 54 | 55 | cfg = Config.fromfile(args.config) 56 | assert args.eval or args.format_only, ( 57 | 'Please specify at least one operation (eval/format the results) with ' 58 | 'the argument "--eval", "--format-only"') 59 | if args.eval and args.format_only: 60 | raise ValueError('--eval and --format_only cannot be both specified') 61 | 62 | if args.cfg_options is not None: 63 | cfg.merge_from_dict(args.cfg_options) 64 | cfg.data.test.test_mode = True 65 | 66 | dataset = build_dataset(cfg.data.test) 67 | outputs = mmcv.load(args.pkl_results) 68 | 69 | kwargs = {} if args.eval_options is None else args.eval_options 70 | if args.format_only: 71 | dataset.format_results(outputs, **kwargs) 72 | if args.eval: 73 | eval_kwargs = cfg.get('evaluation', {}).copy() 74 | # hard-code way to remove EvalHook args 75 | for key in [ 76 | 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 77 | 'rule' 78 | ]: 79 | eval_kwargs.pop(key, None) 80 | eval_kwargs.update(dict(metric=args.eval, **kwargs)) 81 | print(dataset.evaluate(outputs, **eval_kwargs)) 82 | 83 | 84 | if __name__ == '__main__': 85 | main() 86 | -------------------------------------------------------------------------------- /PLUG-Det/configs_wsod/base.py: -------------------------------------------------------------------------------- 1 | # model training and testing settings 2 | train_cfg = dict( 3 | rcnn=dict()) 4 | test_cfg = dict( 5 | rcnn=dict( 6 | score_thr=0.0000, 7 | nms=dict(type='nms', iou_threshold=0.3), 8 | max_per_img=100)) 9 | 10 | # dataset settings 11 | dataset_type = 'COCOCPDataset' 12 | data_root = '/media/h/H/DOTA10_512_128/' 13 | img_norm_cfg = dict( 14 | mean=[104., 117., 124.], std=[1., 1., 1.], to_rgb=False) 15 | train_pipeline = [ 16 | dict(type='LoadImageFromFile'), 17 | dict(type='LoadWeakAnnotations'), 18 | dict(type='LoadProposals'), 19 | dict(type='Resize', img_scale=[(488, 2000), (576, 2000), (688, 2000), (864, 2000), (1200, 2000)], keep_ratio=True, multiscale_mode='range'), 20 | dict(type='RandomFlip', flip_ratio=0.5), 21 | dict(type='Normalize', **img_norm_cfg), 22 | dict(type='Pad', size_divisor=32), 23 | dict(type='DefaultFormatBundle'), 24 | dict(type='Collect', keys=['img', 'gt_labels', 'proposals']), 25 | ] 26 | test_pipeline = [ 27 | dict(type='LoadImageFromFile'), 28 | dict(type='LoadProposals'), 29 | dict( 30 | type='MultiScaleFlipAug', 31 | img_scale=(688, 2000), 32 | #img_scale=[(500, 2000), (600, 2000), (700, 2000), (800, 2000), (900, 2000)], 33 | flip=False, 34 | transforms=[ 35 | dict(type='Resize', keep_ratio=True), 36 | dict(type='RandomFlip'), 37 | dict(type='Normalize', **img_norm_cfg), 38 | dict(type='Pad', size_divisor=32), 39 | dict(type='ImageToTensor', keys=['img']), 40 | dict(type='Collect', keys=['img', 'proposals']), 41 | ]) 42 | ] 43 | # data = dict( 44 | # samples_per_gpu=1, 45 | # workers_per_gpu=2, 46 | # train=dict( 47 | # type=dataset_type, 48 | # ann_file=data_root + '/annotations/train.txt', 49 | # img_prefix=data_root + '/train/images/', 50 | # proposal_file='/train/SSW/selective_search.pkl', 51 | # pipeline=train_pipeline), 52 | # val=dict( 53 | # type=dataset_type, 54 | # ann_file=data_root + '/annotations/train.txt', 55 | # img_prefix=data_root + '/train/images/', 56 | # proposal_file='/train/SSW/selective_search.pkl', 57 | # pipeline=test_pipeline), 58 | # test=dict( 59 | # type=dataset_type, 60 | # ann_file=data_root + '/annotations/train.txt', 61 | # img_prefix=data_root + 'VOC2007/', 62 | # proposal_file='/train/SSW/selective_search.pkl', 63 | # pipeline=test_pipeline)) 64 | evaluation = dict(interval=100, metric='mAP') 65 | 66 | # optimizer 67 | optimizer = dict( 68 | type='Adam', 69 | lr=1e-5, 70 | weight_decay=0.0005, 71 | paramwise_cfg=dict( 72 | bias_decay_mult=0., 73 | bias_lr_mult=2., 74 | custom_keys={ 75 | 'refine': dict(lr_mult=10), 76 | }) 77 | ) 78 | 79 | optimizer_config = dict(grad_clip=None) 80 | # learning policy 81 | lr_config = dict( 82 | policy='step', 83 | warmup='linear', 84 | warmup_iters=500, 85 | warmup_ratio=0.001, 86 | step=[36]) 87 | total_epochs = 64 88 | 89 | checkpoint_config = dict(interval=12) 90 | # yapf:disable 91 | log_config = dict( 92 | interval=100, 93 | hooks=[ 94 | dict(type='TextLoggerHook'), 95 | dict(type='TensorboardLoggerHook') 96 | ]) 97 | # yapf:enable 98 | dist_params = dict(backend='nccl') 99 | log_level = 'INFO' 100 | load_from = None #'pretrain/vgg16_v2.pth' 101 | resume_from = None 102 | workflow = [('train', 1)] 103 | -------------------------------------------------------------------------------- /PLUG-Det/data_process/DOTA_generate_single_point_annotation.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | from pycocotools.coco import COCO 4 | from tqdm import tqdm 5 | import numpy as np 6 | 7 | def main(): 8 | dota_json_file = '/media/h/H/DOTA10_512_128/annotations/DOTA_train_512.json' 9 | isaid_json_file = '/media/h/H/ISAID_512_128/annotations/iSAID_train_512_coarse.json' 10 | dump_file = '/media/h/H/DOTA10_512_128/annotations/DOTA_train_512_coarse_seg.json' 11 | dota_json_info = json.load(open(dota_json_file,'r')) 12 | new_annotations = [] 13 | isaid_coco = COCO(isaid_json_file) 14 | dota_coco = COCO(dota_json_file) 15 | id_temp = 0 16 | for img_id in tqdm(dota_coco.getImgIds()): 17 | dota_anns = dota_coco.imgToAnns[img_id] 18 | file_name = dota_coco.imgs[img_id]['file_name'] 19 | for img_id_ in isaid_coco.imgs: 20 | if isaid_coco.imgs[img_id_]['file_name']==file_name: 21 | isaid_img_id = isaid_coco.imgs[img_id_]['id'] 22 | isaid_anns = isaid_coco.imgToAnns[isaid_img_id] 23 | assert file_name == isaid_coco.imgs[isaid_img_id]['file_name'] 24 | for dota_ann in dota_anns: 25 | dota_box = dota_ann['bbox'] 26 | isaid_bboxes = [isaid_anns[i]['bbox'] for i in range(len(isaid_anns))] 27 | ious = [] 28 | if len(isaid_bboxes)>0: 29 | for isaid_box in isaid_bboxes: 30 | iou = calculate_iou(dota_box, isaid_box) 31 | ious.append(iou) 32 | else: 33 | ious = [0] 34 | if max(ious) >= 0.1: 35 | fuse_index = np.argmax(ious) 36 | isaid_ann = isaid_anns[fuse_index] 37 | #dota_ann['point'] = isaid_ann['point'] 38 | #dota_ann['pseudo_box'] = isaid_ann['pseudo_box'] 39 | dota_ann['point'].update('point':isaid_ann['point']) 40 | dota_ann['pseudo_box'].update('pseudo_bbox':isaid_ann['pseudo_bbox']) 41 | dota_ann['bbox'] = isaid_ann['bbox'] 42 | dota_ann['area'] = isaid_ann['area'] 43 | dota_ann['segmentation'] = isaid_ann['segmentation'] 44 | id_temp+=1 45 | dota_ann['id'] = id_temp 46 | new_annotations.append(dota_ann) 47 | dota_json_info['annotations'] = new_annotations 48 | json.dump(dota_json_info, open(dump_file, 'w'), indent=4) 49 | def calculate_iou(box1, box2): 50 | box1_area = (box1[-2]+1)*(box1[-1]+1) 51 | box2_area = (box2[-2]+1)*(box2[-1]+1) 52 | intersection_area = get_intersection_area(box1, box2) 53 | union_area = box1_area + box2_area - intersection_area 54 | iou = float(intersection_area) / float(union_area) 55 | return iou 56 | def get_intersection_area(box1, box2): 57 | """ 58 | Calculates the intersection area of two bounding boxes where (x1,y1) indicates the top left corner and (x2,y2) 59 | indicates the bottom right corner 60 | :param box1: List of coordinates(x1,y1,x2,y2) of box1 61 | :param box2: List of coordinates(x1,y1,x2,y2) of box2 62 | :return: float: area of intersection of the two boxes 63 | """ 64 | x1 = max(box1[0], box2[0]) 65 | x2 = min(box1[0] + box1[2], box2[0] + box2[2]) 66 | y1 = max(box1[1], box2[1]) 67 | y2 = min(box1[1] + box1[3], box2[1] + box2[3]) 68 | # Check for the condition if there is no overlap between the bounding boxes (either height or width 69 | # of intersection box are negative) 70 | if (x2 - x1 < 0) or (y2 - y1 < 0): 71 | return 0.0 72 | else: 73 | return (x2 - x1 + 1) * (y2 - y1 + 1) 74 | if __name__ == '__main__': 75 | main() 76 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/losses/gaussian_focal_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcv 3 | import torch.nn as nn 4 | 5 | from ..builder import LOSSES 6 | from .utils import weighted_loss 7 | 8 | 9 | @mmcv.jit(derivate=True, coderize=True) 10 | @weighted_loss 11 | def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): 12 | """`Focal Loss `_ for targets in gaussian 13 | distribution. 14 | 15 | Args: 16 | pred (torch.Tensor): The prediction. 17 | gaussian_target (torch.Tensor): The learning target of the prediction 18 | in gaussian distribution. 19 | alpha (float, optional): A balanced form for Focal Loss. 20 | Defaults to 2.0. 21 | gamma (float, optional): The gamma for calculating the modulating 22 | factor. Defaults to 4.0. 23 | """ 24 | eps = 1e-12 25 | pos_weights = gaussian_target.eq(1) 26 | neg_weights = (1 - gaussian_target).pow(gamma) 27 | pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights 28 | neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights 29 | return pos_loss + neg_loss 30 | 31 | 32 | @LOSSES.register_module() 33 | class GaussianFocalLoss(nn.Module): 34 | """GaussianFocalLoss is a variant of focal loss. 35 | 36 | More details can be found in the `paper 37 | `_ 38 | Code is modified from `kp_utils.py 39 | `_ # noqa: E501 40 | Please notice that the target in GaussianFocalLoss is a gaussian heatmap, 41 | not 0/1 binary target. 42 | 43 | Args: 44 | alpha (float): Power of prediction. 45 | gamma (float): Power of target for negative samples. 46 | reduction (str): Options are "none", "mean" and "sum". 47 | loss_weight (float): Loss weight of current loss. 48 | """ 49 | 50 | def __init__(self, 51 | alpha=2.0, 52 | gamma=4.0, 53 | reduction='mean', 54 | loss_weight=1.0): 55 | super(GaussianFocalLoss, self).__init__() 56 | self.alpha = alpha 57 | self.gamma = gamma 58 | self.reduction = reduction 59 | self.loss_weight = loss_weight 60 | 61 | def forward(self, 62 | pred, 63 | target, 64 | weight=None, 65 | avg_factor=None, 66 | reduction_override=None): 67 | """Forward function. 68 | 69 | Args: 70 | pred (torch.Tensor): The prediction. 71 | target (torch.Tensor): The learning target of the prediction 72 | in gaussian distribution. 73 | weight (torch.Tensor, optional): The weight of loss for each 74 | prediction. Defaults to None. 75 | avg_factor (int, optional): Average factor that is used to average 76 | the loss. Defaults to None. 77 | reduction_override (str, optional): The reduction method used to 78 | override the original reduction method of the loss. 79 | Defaults to None. 80 | """ 81 | assert reduction_override in (None, 'none', 'mean', 'sum') 82 | reduction = ( 83 | reduction_override if reduction_override else self.reduction) 84 | loss_reg = self.loss_weight * gaussian_focal_loss( 85 | pred, 86 | target, 87 | weight, 88 | alpha=self.alpha, 89 | gamma=self.gamma, 90 | reduction=reduction, 91 | avg_factor=avg_factor) 92 | return loss_reg 93 | -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/detectors/PLUG.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from mmdet.models.detectors.single_stage import SingleStageDetector 3 | from mmdet.core import bbox2result 4 | import torch.nn.functional as F 5 | import torch.nn as nn 6 | import torch 7 | 8 | @DETECTORS.register_module() 9 | class PLUG(SingleStageDetector): 10 | def __init__(self, 11 | backbone, 12 | neck, 13 | bbox_head, 14 | train_cfg=None, 15 | test_cfg=None, 16 | pretrained=None, 17 | init_cfg=None): 18 | super(PLUG, 19 | self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, 20 | pretrained, init_cfg) 21 | 22 | def forward_train(self, 23 | img, 24 | img_metas, 25 | gt_bboxes, 26 | gt_labels, 27 | gt_bboxes_ignore=None, 28 | gt_pseudo_bboxes=None, 29 | ): 30 | super(SingleStageDetector, self).forward_train(img, img_metas) 31 | x = self.extract_feat(img) 32 | losses = self.bbox_head.forward_train(x, img, img_metas, gt_pseudo_bboxes, 33 | gt_labels) 34 | return losses 35 | 36 | def set_epoch(self, epoch): 37 | self.bbox_head.epoch = epoch 38 | def set_iter(self, iter): 39 | self.bbox_head.iter = iter 40 | def set_inner_iter(self, inner_iter): 41 | self.bbox_head.inner_iter = inner_iter 42 | 43 | def extract_feat(self, img): 44 | """Directly extract features from the backbone+neck.""" 45 | x = self.backbone(img) 46 | if self.with_neck: 47 | x = self.neck(x) 48 | return x 49 | 50 | def simple_test(self, img, img_metas, rescale=False, gt_pseudo_bboxes=None, gt_labels=None,\ 51 | gt_bboxes_ignore=None, gt_anns_id=None,gt_bboxes=None, gt_masks= None, two_model = None): 52 | """Test function without test-time augmentation. 53 | 54 | Args: 55 | img (torch.Tensor): Images with shape (N, C, H, W). 56 | img_metas (list[dict]): List of image information. 57 | rescale (bool, optional): Whether to rescale the results. 58 | Defaults to False. 59 | 60 | Returns: 61 | list[list[np.ndarray]]: BBox results of each image and classes. 62 | The outer list corresponds to each image. The inner list 63 | corresponds to each class. 64 | """ 65 | feat = self.extract_feat(img) 66 | results_list = self.bbox_head.simple_test( 67 | feat, img, img_metas, rescale=rescale, gt_pseudo_bboxes=gt_pseudo_bboxes, gt_labels=gt_labels,\ 68 | gt_bboxes_ignore=gt_bboxes_ignore, gt_anns_id=gt_anns_id,gt_bboxes=gt_bboxes) 69 | final_results_list = results_list[0] 70 | bbox_results = [ 71 | bbox2result(det_bbox, det_label, self.bbox_head.num_classes) 72 | for det_bbox, det_label in zip(final_results_list[0][0], gt_labels[0]) 73 | ] 74 | mask_results = [ 75 | self.mask2result(det_mask, det_label, self.bbox_head.num_classes) 76 | for det_mask, det_label in zip(final_results_list[1], gt_labels[0]) 77 | ] 78 | return list(zip(bbox_results, mask_results)) 79 | def mask2result(self, maskes, labels, num_classes): 80 | maskes = torch.stack(maskes,0) 81 | out_mask = [maskes[labels == i] for i in range(num_classes)] 82 | out_mask = [list(out_mask[i]) for i in range(num_classes)] 83 | return out_mask -------------------------------------------------------------------------------- /PLUG-Det/mmdet/models/roi_heads/base_roi_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | from mmcv.runner import BaseModule 5 | 6 | from ..builder import build_shared_head 7 | 8 | 9 | class BaseRoIHead(BaseModule, metaclass=ABCMeta): 10 | """Base class for RoIHeads.""" 11 | 12 | def __init__(self, 13 | bbox_roi_extractor=None, 14 | bbox_head=None, 15 | mask_roi_extractor=None, 16 | mask_head=None, 17 | shared_head=None, 18 | train_cfg=None, 19 | test_cfg=None, 20 | pretrained=None, 21 | init_cfg=None): 22 | super(BaseRoIHead, self).__init__(init_cfg) 23 | self.train_cfg = train_cfg 24 | self.test_cfg = test_cfg 25 | if shared_head is not None: 26 | shared_head.pretrained = pretrained 27 | self.shared_head = build_shared_head(shared_head) 28 | 29 | if bbox_head is not None: 30 | self.init_bbox_head(bbox_roi_extractor, bbox_head) 31 | 32 | if mask_head is not None: 33 | self.init_mask_head(mask_roi_extractor, mask_head) 34 | 35 | self.init_assigner_sampler() 36 | 37 | @property 38 | def with_bbox(self): 39 | """bool: whether the RoI head contains a `bbox_head`""" 40 | return hasattr(self, 'bbox_head') and self.bbox_head is not None 41 | 42 | @property 43 | def with_mask(self): 44 | """bool: whether the RoI head contains a `mask_head`""" 45 | return hasattr(self, 'mask_head') and self.mask_head is not None 46 | 47 | @property 48 | def with_shared_head(self): 49 | """bool: whether the RoI head contains a `shared_head`""" 50 | return hasattr(self, 'shared_head') and self.shared_head is not None 51 | 52 | @abstractmethod 53 | def init_bbox_head(self): 54 | """Initialize ``bbox_head``""" 55 | pass 56 | 57 | @abstractmethod 58 | def init_mask_head(self): 59 | """Initialize ``mask_head``""" 60 | pass 61 | 62 | @abstractmethod 63 | def init_assigner_sampler(self): 64 | """Initialize assigner and sampler.""" 65 | pass 66 | 67 | @abstractmethod 68 | def forward_train(self, 69 | x, 70 | img_meta, 71 | proposal_list, 72 | gt_bboxes, 73 | gt_labels, 74 | gt_bboxes_ignore=None, 75 | gt_masks=None, 76 | **kwargs): 77 | """Forward function during training.""" 78 | 79 | async def async_simple_test(self, 80 | x, 81 | proposal_list, 82 | img_metas, 83 | proposals=None, 84 | rescale=False, 85 | **kwargs): 86 | """Asynchronized test function.""" 87 | raise NotImplementedError 88 | 89 | def simple_test(self, 90 | x, 91 | proposal_list, 92 | img_meta, 93 | proposals=None, 94 | rescale=False, 95 | **kwargs): 96 | """Test without augmentation.""" 97 | 98 | def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): 99 | """Test with augmentations. 100 | 101 | If rescale is False, then returned bboxes and masks will fit the scale 102 | of imgs[0]. 103 | """ 104 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Learning Remote Sensing Object Detection with Single Point Supervision 2 |
3 | 4 | This is the PyTorch implementation of the method in our paper "*Learning Remote Sensing Object Detection with Single Point Supervision*". 5 | 6 |
7 | 8 |
9 | 10 | ## Preparation: 11 | 12 | #### 1. Requirement: 13 | * [[mmdetection 2.22.0](https://github.com/open-mmlab/mmdetection)] 14 | * [[DOTA_devkit](https://github.com/CAPTAIN-WHU/DOTA_devkit)] 15 | * [[iSAID_devkit](https://github.com/CAPTAIN-WHU/iSAID_Devkit)] 16 | * [[dijkstra](https://github.com/BraveGroup/PSPS)] 17 | 18 | #### 2. Generating data with single point labels: 19 | * First, we use the DOTA_devkit and iSAID_devkit toolbox to generate cropped images with json annotations. 20 | * Second, we add single point annotations in the json file of iSAID dataset 21 | `data_process/iSAID_generate_single_point_annotation.py` 22 | * Third, we utilize the generated of iSAID json file to add single point information in the DOTA json file. 23 | `data_process/DOTA_generate_single_point_annotation.py` 24 | ## Model training and validation: 25 | #### 1. Training PLUG: 26 | * Run `train.py` with config `configs_single_point/PLUG_r50_DOTA_512.py` to train PLUG. 27 | #### 2. Referencing and validating PLUG: 28 | * Run `test_PLUG.py` to reference PLUG and generate pseudo boxes of training data. 29 | * Run `data_process/calculate_mIoU.py` to output the mIoU results of PLUG. 30 | #### 3. Training Faster-RCNN or Mask-RCNN: 31 | * Run `data_process/bbox2json.py` to generate json of training data with pseudo boxes. 32 | * Run `train.py` with config `configs/faster_rcnn/faster_rcnn_r50_fpn_1x_dota.py` to train Faster-RCNN. 33 | * Run `data_process/segm2json.py` to generate json of training data with pseudo boxes and pseudo masks. 34 | * Run `train.py` with config `configs/mask_rcnn/mask_rcnn_r50_fpn_1x_dota.py` to train Mask-RCNN. 35 | #### 4. Testing Faster-RCNN or Mask-RCNN: 36 | * Run `test.py` to reference and evaluate Faster-RCNN and Mask-RCNN. 37 | * Run `DetVisGUI\DetVisGUI.py` to visualize the detection results of different detectors conveniently. ( [[DetVisGUI](https://github.com/Chien-Hung/DetVisGUI)]) 38 | #### 5. Other methods: 39 | * We retrain P2BNet, WSDDN and OICR in our code based on [[P2BNet](https://github.com/ucas-vg/P2BNet)] and [[WSOD2](https://github.com/researchmm/WSOD2)]. 40 | * Run `train.py` with different configs to train the above methods. 41 | #### 6. Other codes: 42 | * We split the training dataset according the object numbers in images to evaluate the effects of dense obejects. 43 | `data_process/split_DOTA_image_and_json.py` 44 | * Run `test_num.py` to generate the pseudo boxes of sub dataset with different object numbers cyclically. 45 | * Run `data_process/bar_chart.py` to generate the mIoU distribution of images with different object numbers. 46 | ## Our model and data annotations: 47 | * Please download from the [[checkpoints(提取码:eh62)](https://pan.baidu.com/s/1yonTazs25aTLnwIkU_mOMw?pwd=eh62)]. 48 | 49 | 50 | ## Citiation 51 | **If you find this work helpful, please consider citing:** 52 | ``` 53 | @Article{PLUG-Det, 54 | author = {He, Shitian and Zou, Huanxin and Wang, Yingqian and Li, Boyang and Cao, Xu and Jing, Ning}, 55 | title = {Learning Remote Sensing Object Detection with Single Point Supervision}, 56 | journal = {IEEE TGRS}, 57 | year = {2023}, 58 | } 59 | ``` 60 |
61 | 62 | ## Contact 63 | **Welcome to raise issues or email to heshitian19@nudt.edu.cn for any question regarding this work.** 64 | 71 | -------------------------------------------------------------------------------- /PLUG-Det/tools/misc/download_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from itertools import repeat 3 | from multiprocessing.pool import ThreadPool 4 | from pathlib import Path 5 | from tarfile import TarFile 6 | from zipfile import ZipFile 7 | 8 | import torch 9 | 10 | 11 | def parse_args(): 12 | parser = argparse.ArgumentParser( 13 | description='Download datasets for training') 14 | parser.add_argument( 15 | '--dataset-name', type=str, help='dataset name', default='coco2017') 16 | parser.add_argument( 17 | '--save-dir', 18 | type=str, 19 | help='the dir to save dataset', 20 | default='data/coco') 21 | parser.add_argument( 22 | '--unzip', 23 | action='store_true', 24 | help='whether unzip dataset or not, zipped files will be saved') 25 | parser.add_argument( 26 | '--delete', 27 | action='store_true', 28 | help='delete the download zipped files') 29 | parser.add_argument( 30 | '--threads', type=int, help='number of threading', default=4) 31 | args = parser.parse_args() 32 | return args 33 | 34 | 35 | def download(url, dir, unzip=True, delete=False, threads=1): 36 | 37 | def download_one(url, dir): 38 | f = dir / Path(url).name 39 | if Path(url).is_file(): 40 | Path(url).rename(f) 41 | elif not f.exists(): 42 | print('Downloading {} to {}'.format(url, f)) 43 | torch.hub.download_url_to_file(url, f, progress=True) 44 | if unzip and f.suffix in ('.zip', '.tar'): 45 | print('Unzipping {}'.format(f.name)) 46 | if f.suffix == '.zip': 47 | ZipFile(f).extractall(path=dir) 48 | elif f.suffix == '.tar': 49 | TarFile(f).extractall(path=dir) 50 | if delete: 51 | f.unlink() 52 | print('Delete {}'.format(f)) 53 | 54 | dir = Path(dir) 55 | if threads > 1: 56 | pool = ThreadPool(threads) 57 | pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) 58 | pool.close() 59 | pool.join() 60 | else: 61 | for u in [url] if isinstance(url, (str, Path)) else url: 62 | download_one(u, dir) 63 | 64 | 65 | def main(): 66 | args = parse_args() 67 | path = Path(args.save_dir) 68 | if not path.exists(): 69 | path.mkdir(parents=True, exist_ok=True) 70 | data2url = dict( 71 | # TODO: Support for downloading Panoptic Segmentation of COCO 72 | coco2017=[ 73 | 'http://images.cocodataset.org/zips/train2017.zip', 74 | 'http://images.cocodataset.org/zips/val2017.zip', 75 | 'http://images.cocodataset.org/zips/test2017.zip', 76 | 'http://images.cocodataset.org/annotations/' + 77 | 'annotations_trainval2017.zip' 78 | ], 79 | lvis=[ 80 | 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa 81 | 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa 82 | ], 83 | voc2007=[ 84 | 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa 85 | 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa 86 | 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa 87 | ], 88 | ) 89 | url = data2url.get(args.dataset_name, None) 90 | if url is None: 91 | print('Only support COCO, VOC, and LVIS now!') 92 | return 93 | download( 94 | url, 95 | dir=path, 96 | unzip=args.unzip, 97 | delete=args.delete, 98 | threads=args.threads) 99 | 100 | 101 | if __name__ == '__main__': 102 | main() 103 | -------------------------------------------------------------------------------- /PLUG-Det/configs_wsod/oicr_res50.py: -------------------------------------------------------------------------------- 1 | _base_ = './base.py' 2 | # model settings 3 | dataset_type = 'CocoCPDataset' 4 | data_root = '/media/h/H/DOTA10_512_128/' 5 | img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 6 | train_pipeline = [ 7 | dict(type='LoadImageFromFile'), 8 | dict(type='LoadWeakAnnotations'), 9 | dict(type='LoadProposals'), 10 | dict(type='Resize', img_scale=(512, 512)), 11 | dict(type='RandomFlip', flip_ratio=0.5), 12 | dict(type='Normalize', **img_norm_cfg), 13 | dict(type='Pad', size_divisor=32), 14 | dict(type='DefaultFormatBundle'), 15 | dict(type='Collect', keys=['img', 'gt_labels', 'proposals']), 16 | ] 17 | test_pipeline = [ 18 | dict(type='LoadImageFromFile'), 19 | dict(type='LoadProposals'), 20 | dict( 21 | type='MultiScaleFlipAug', 22 | img_scale=(512, 512), 23 | #img_scale=[(500, 2000), (600, 2000), (700, 2000), (800, 2000), (900, 2000)], 24 | flip=False, 25 | transforms=[ 26 | dict(type='Resize', keep_ratio=True), 27 | dict(type='RandomFlip'), 28 | dict(type='Normalize', **img_norm_cfg), 29 | dict(type='Pad', size_divisor=32), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img', 'proposals']), 32 | ]) 33 | ] 34 | data = dict( 35 | samples_per_gpu=1, workers_per_gpu=0, 36 | train=dict( 37 | # min_gt_size=2, # add 38 | type=dataset_type, 39 | ann_file=data_root + '/annotations/DOTA_train_512_coarse.json', 40 | # ann_file=data_root + '/annotations/DOTA_train_512_center.json', 41 | img_prefix=data_root + '/train/images', 42 | proposal_file= data_root + '/train/SSW/selective_search.pkl', 43 | pipeline=train_pipeline 44 | ), 45 | val=dict( 46 | # min_gt_size=2, # add 47 | type=dataset_type, 48 | ann_file=data_root + '/annotations/DOTA_train_512_coarse.json', 49 | img_prefix=data_root + '/train/images', 50 | proposal_file= data_root + '/train/SSW/selective_search.pkl', 51 | pipeline=test_pipeline, 52 | # test_mode=False # modified 53 | ), 54 | test=dict( 55 | samples_per_gpu=1, 56 | type=dataset_type, 57 | ann_file=data_root + '/annotations/DOTA_train_512_coarse.json', 58 | # ann_file=data_root + '/annotations/DOTA_train_512_center.json', 59 | img_prefix=data_root + '/train/images', 60 | pipeline=test_pipeline 61 | ) 62 | ) 63 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) # add 64 | model = dict( 65 | type='WeakRCNN', 66 | pretrained='torchvision://resnet50', 67 | backbone=dict( 68 | type='ResNet', 69 | depth=50, 70 | num_stages=4, 71 | out_indices=(0, 1, 2, 3), 72 | frozen_stages=1, 73 | norm_cfg=dict(type='BN', requires_grad=True), 74 | norm_eval=True, 75 | style='pytorch', 76 | # init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50') 77 | ), 78 | neck=dict( 79 | type='FPN', 80 | in_channels=[256, 512, 1024, 2048], 81 | out_channels=256, 82 | start_level=1, # 1 83 | add_extra_convs='on_input', 84 | num_outs=1, # 5 85 | # conv_cfg=dict(type='DCNv2'), 86 | norm_cfg=norm_cfg, # add 87 | ), 88 | roi_head=dict( 89 | type='OICRRoIHead', 90 | bbox_roi_extractor=dict( 91 | type='SingleRoIExtractor', 92 | roi_layer=dict(type='RoIPool', output_size=7), 93 | out_channels=512, 94 | featmap_strides=[8]), 95 | bbox_head=dict( 96 | type='OICRHead', 97 | in_channels=256, 98 | hidden_channels=256, 99 | roi_feat_size=7, 100 | num_classes=15)) 101 | ) 102 | work_dir = '/media/h/M/P2B/1dota/WSOD/oicr_res50/' 103 | -------------------------------------------------------------------------------- /PLUG-Det/tools/misc/get_image_metas.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | """Get test image metas on a specific dataset. 3 | 4 | Here is an example to run this script. 5 | 6 | Example: 7 | python tools/misc/get_image_metas.py ${CONFIG} \ 8 | --out ${OUTPUT FILE NAME} 9 | """ 10 | import argparse 11 | import csv 12 | import os.path as osp 13 | from multiprocessing import Pool 14 | 15 | import mmcv 16 | from mmcv import Config 17 | 18 | 19 | def parse_args(): 20 | parser = argparse.ArgumentParser(description='Collect image metas') 21 | parser.add_argument('config', help='Config file path') 22 | parser.add_argument( 23 | '--out', 24 | default='validation-image-metas.pkl', 25 | help='The output image metas file name. The save dir is in the ' 26 | 'same directory as `dataset.ann_file` path') 27 | parser.add_argument( 28 | '--nproc', 29 | default=4, 30 | type=int, 31 | help='Processes used for get image metas') 32 | args = parser.parse_args() 33 | return args 34 | 35 | 36 | def get_metas_from_csv_style_ann_file(ann_file): 37 | data_infos = [] 38 | cp_filename = None 39 | with open(ann_file, 'r') as f: 40 | reader = csv.reader(f) 41 | for i, line in enumerate(reader): 42 | if i == 0: 43 | continue 44 | img_id = line[0] 45 | filename = f'{img_id}.jpg' 46 | if filename != cp_filename: 47 | data_infos.append(dict(filename=filename)) 48 | cp_filename = filename 49 | return data_infos 50 | 51 | 52 | def get_metas_from_txt_style_ann_file(ann_file): 53 | with open(ann_file) as f: 54 | lines = f.readlines() 55 | i = 0 56 | data_infos = [] 57 | while i < len(lines): 58 | filename = lines[i].rstrip() 59 | data_infos.append(dict(filename=filename)) 60 | skip_lines = int(lines[i + 2]) + 3 61 | i += skip_lines 62 | return data_infos 63 | 64 | 65 | def get_image_metas(data_info, img_prefix): 66 | file_client = mmcv.FileClient(backend='disk') 67 | filename = data_info.get('filename', None) 68 | if filename is not None: 69 | if img_prefix is not None: 70 | filename = osp.join(img_prefix, filename) 71 | img_bytes = file_client.get(filename) 72 | img = mmcv.imfrombytes(img_bytes, flag='color') 73 | meta = dict(filename=filename, ori_shape=img.shape) 74 | else: 75 | raise NotImplementedError('Missing `filename` in data_info') 76 | return meta 77 | 78 | 79 | def main(): 80 | args = parse_args() 81 | assert args.out.endswith('pkl'), 'The output file name must be pkl suffix' 82 | 83 | # load config files 84 | cfg = Config.fromfile(args.config) 85 | ann_file = cfg.data.test.ann_file 86 | img_prefix = cfg.data.test.img_prefix 87 | 88 | print(f'{"-" * 5} Start Processing {"-" * 5}') 89 | if ann_file.endswith('csv'): 90 | data_infos = get_metas_from_csv_style_ann_file(ann_file) 91 | elif ann_file.endswith('txt'): 92 | data_infos = get_metas_from_txt_style_ann_file(ann_file) 93 | else: 94 | shuffix = ann_file.split('.')[-1] 95 | raise NotImplementedError('File name must be csv or txt suffix but ' 96 | f'get {shuffix}') 97 | 98 | print(f'Successfully load annotation file from {ann_file}') 99 | print(f'Processing {len(data_infos)} images...') 100 | pool = Pool(args.nproc) 101 | # get image metas with multiple processes 102 | image_metas = pool.starmap( 103 | get_image_metas, 104 | zip(data_infos, [img_prefix for _ in range(len(data_infos))]), 105 | ) 106 | pool.close() 107 | 108 | # save image metas 109 | root_path = cfg.data.test.ann_file.rsplit('/', 1)[0] 110 | save_path = osp.join(root_path, args.out) 111 | mmcv.dump(image_metas, save_path) 112 | print(f'Image meta file save to: {save_path}') 113 | 114 | 115 | if __name__ == '__main__': 116 | main() 117 | -------------------------------------------------------------------------------- /PLUG-Det/configs/_base_/models/faster_rcnn_r50_fpn.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | model = dict( 3 | type='FasterRCNN', 4 | backbone=dict( 5 | type='ResNet', 6 | depth=50, 7 | num_stages=4, 8 | out_indices=(0, 1, 2, 3), 9 | frozen_stages=1, 10 | norm_cfg=dict(type='BN', requires_grad=True), 11 | norm_eval=True, 12 | style='pytorch', 13 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), 14 | neck=dict( 15 | type='FPN', 16 | in_channels=[256, 512, 1024, 2048], 17 | out_channels=256, 18 | num_outs=5), 19 | rpn_head=dict( 20 | type='RPNHead', 21 | in_channels=256, 22 | feat_channels=256, 23 | anchor_generator=dict( 24 | type='AnchorGenerator', 25 | scales=[8], 26 | ratios=[0.5, 1.0, 2.0], 27 | strides=[4, 8, 16, 32, 64]), 28 | bbox_coder=dict( 29 | type='DeltaXYWHBBoxCoder', 30 | target_means=[.0, .0, .0, .0], 31 | target_stds=[1.0, 1.0, 1.0, 1.0]), 32 | loss_cls=dict( 33 | type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), 34 | loss_bbox=dict(type='L1Loss', loss_weight=1.0)), 35 | roi_head=dict( 36 | type='StandardRoIHead', 37 | bbox_roi_extractor=dict( 38 | type='SingleRoIExtractor', 39 | roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), 40 | out_channels=256, 41 | featmap_strides=[4, 8, 16, 32]), 42 | bbox_head=dict( 43 | type='Shared2FCBBoxHead', 44 | in_channels=256, 45 | fc_out_channels=1024, 46 | roi_feat_size=7, 47 | num_classes=15, 48 | bbox_coder=dict( 49 | type='DeltaXYWHBBoxCoder', 50 | target_means=[0., 0., 0., 0.], 51 | target_stds=[0.1, 0.1, 0.2, 0.2]), 52 | reg_class_agnostic=False, 53 | loss_cls=dict( 54 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), 55 | loss_bbox=dict(type='L1Loss', loss_weight=1.0))), 56 | # model training and testing settings 57 | train_cfg=dict( 58 | rpn=dict( 59 | assigner=dict( 60 | type='MaxIoUAssigner', 61 | pos_iou_thr=0.7, 62 | neg_iou_thr=0.3, 63 | min_pos_iou=0.3, 64 | match_low_quality=True, 65 | ignore_iof_thr=-1), 66 | sampler=dict( 67 | type='RandomSampler', 68 | num=256, 69 | pos_fraction=0.5, 70 | neg_pos_ub=-1, 71 | add_gt_as_proposals=False), 72 | allowed_border=-1, 73 | pos_weight=-1, 74 | debug=False), 75 | rpn_proposal=dict( 76 | nms_pre=2000, 77 | max_per_img=1000, 78 | nms=dict(type='nms', iou_threshold=0.7), 79 | min_bbox_size=0), 80 | rcnn=dict( 81 | assigner=dict( 82 | type='MaxIoUAssigner', 83 | pos_iou_thr=0.5, 84 | neg_iou_thr=0.5, 85 | min_pos_iou=0.5, 86 | match_low_quality=False, 87 | ignore_iof_thr=-1), 88 | sampler=dict( 89 | type='RandomSampler', 90 | num=512, 91 | pos_fraction=0.25, 92 | neg_pos_ub=-1, 93 | add_gt_as_proposals=True), 94 | pos_weight=-1, 95 | debug=False)), 96 | test_cfg=dict( 97 | rpn=dict( 98 | nms_pre=1000, 99 | max_per_img=1000, 100 | nms=dict(type='nms', iou_threshold=0.7), 101 | min_bbox_size=0), 102 | rcnn=dict( 103 | score_thr=0.05, 104 | nms=dict(type='nms', iou_threshold=0.5), 105 | max_per_img=100) 106 | # soft-nms is also supported for rcnn testing 107 | # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) 108 | )) 109 | --------------------------------------------------------------------------------