├── .dev_scripts
└── gather_models.py
├── .gitattributes
├── .github
├── CONTRIBUTING.md
├── ISSUE_TEMPLATE
│ ├── 1-bug-report.yml
│ ├── 2-feature-request.yml
│ ├── 3-new-model.yml
│ ├── 4-documentation.yml
│ └── config.yml
├── pull_request_template.md
└── workflows
│ ├── lint.yml
│ ├── merge_stage_test.yml
│ ├── pr_stage_test.yml
│ ├── publish-to-pypi.yml
│ ├── scripts
│ └── get_mmcv_var.sh
│ └── test_mim.yml
├── .gitignore
├── .pre-commit-config-zh-cn.yaml
├── .pre-commit-config.yaml
├── .pylintrc
├── .readthedocs.yml
├── CITATION.cff
├── LICENSE
├── MANIFEST.in
├── README.md
├── README_mmrotate.md
├── README_zh-CN.md
├── configs
├── _base_
│ ├── datasets
│ │ ├── dior.py
│ │ ├── dota.py
│ │ ├── dota_coco.py
│ │ ├── dota_ms.py
│ │ ├── dota_qbox.py
│ │ ├── dotav15.py
│ │ ├── dotav2.py
│ │ ├── hrsc.py
│ │ ├── hrsid.py
│ │ ├── rsdd.py
│ │ ├── semi_visdronezsd.py
│ │ ├── srsdd.py
│ │ ├── ssdd.py
│ │ └── visdronezsd.py
│ ├── default_runtime.py
│ ├── models
│ │ └── oriented-rcnn-le90_r50_fpn.py
│ └── schedules
│ │ ├── schedule_1x.py
│ │ ├── schedule_3x.py
│ │ ├── schedule_40e.py
│ │ └── schedule_6x.py
├── cfa
│ ├── README.md
│ ├── cfa-qbox_r50_fpn_1x_dota.py
│ ├── cfa-qbox_r50_fpn_40e_dota.py
│ └── metafile.yml
├── convnext
│ ├── README.md
│ ├── metafile.yml
│ └── rotated-retinanet-rbox-le90_convnext-tiny_fpn_kld-stable_adamw-1x_dota.py
├── csl
│ ├── README.md
│ ├── metafile.yml
│ └── rotated-retinanet-rbox-le90_r50_fpn_csl-gaussian_amp-1x_dota.py
├── gliding_vertex
│ ├── README.md
│ ├── gliding-vertex-qbox_r50_fpn_1x_dota.py
│ ├── gliding-vertex-rbox_r50_fpn_1x_dota.py
│ └── metafile.yml
├── gwd
│ ├── README.md
│ ├── metafile.yml
│ ├── rotated-retinanet-hbox-oc_r50_fpn_gwd_1x_dota.py
│ ├── rotated-retinanet-rbox-le135_r50_fpn_gwd_1x_dota.py
│ └── rotated-retinanet-rbox-le90_r50_fpn_gwd_1x_dota.py
├── h2rbox
│ ├── README.md
│ ├── dior
│ │ └── h2rbox-le90_r50_fpn_adamw-1x_dior.py
│ ├── dotav15
│ │ ├── h2rbox-le90_r50_fpn_adamw-1x_dotav15.py
│ │ └── h2rbox-le90_r50_fpn_adamw-3x_dotav15.py
│ ├── dotav2
│ │ ├── h2rbox-le90_r50_fpn_adamw-1x_dotav2.py
│ │ └── h2rbox-le90_r50_fpn_adamw-3x_dotav2.py
│ ├── h2rbox-le90_r50_fpn_adamw-1x_dota-ms.py
│ ├── h2rbox-le90_r50_fpn_adamw-1x_dota.py
│ ├── h2rbox-le90_r50_fpn_adamw-3x_dota.py
│ └── metafile.yml
├── h2rbox_v2
│ ├── README.md
│ ├── h2rbox_v2-le90_r50_fpn-1x_dota.py
│ ├── h2rbox_v2-le90_r50_fpn-1x_dotav15.py
│ ├── h2rbox_v2-le90_r50_fpn-1x_dotav2.py
│ ├── h2rbox_v2-le90_r50_fpn-6x_hrsc.py
│ ├── h2rbox_v2-le90_r50_fpn_ms_rr-1x_dota.py
│ ├── h2rbox_v2-le90_r50_fpn_rr-6x_hrsc.py
│ └── metafile.yml
├── kfiou
│ ├── README.md
│ ├── metafile.yml
│ ├── r3det-oc_r50_fpn_kfiou-ln_1x_dota.py
│ ├── roi-trans-le90_r50_fpn_kfiou-ln_1x_dota.py
│ ├── rotated-retinanet-hbox-le135_r50_fpn_kfiou_1x_dota.py
│ ├── rotated-retinanet-hbox-le90_r50_fpn_kfiou_1x_dota.py
│ ├── rotated-retinanet-hbox-oc_r50_fpn_kfiou_1x_dota.py
│ └── s2anet-le135_r50_fpn_kfiou-ln_1x_dota.py
├── kld
│ ├── README.md
│ ├── metafile.yml
│ ├── r3det-oc_r50_fpn_kld-stable_1x_dota.py
│ ├── r3det-oc_r50_fpn_kld_1x_dota.py
│ ├── r3det-tiny-oc_r50_fpn_kld_1x_dota.py
│ ├── rotated-retinanet-hbox-oc_r50_fpn_kld-stable_1x_dota.py
│ ├── rotated-retinanet-hbox-oc_r50_fpn_kld-stable_rr-6x_hrsc.py
│ ├── rotated-retinanet-hbox-oc_r50_fpn_kld_1x_dota.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_kld-stable_1x_dota.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_kld-stable_adamw-1x_dota.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_kld-stable_rr-6x_hrsc.py
│ └── rotated-retinanet-rbox-le90_r50_fpn_kld_1x_dota.py
├── oriented_rcnn
│ ├── README.md
│ ├── metafile.yml
│ ├── oriented-rcnn-le90_r50_fpn_1x_dota.py
│ ├── oriented-rcnn-le90_r50_fpn_6x_hrsid.py
│ ├── oriented-rcnn-le90_r50_fpn_6x_rsdd.py
│ ├── oriented-rcnn-le90_r50_fpn_6x_srsdd.py
│ ├── oriented-rcnn-le90_r50_fpn_6x_ssdd.py
│ ├── oriented-rcnn-le90_r50_fpn_amp-1x_dota.py
│ └── oriented-rcnn-le90_swin-tiny_fpn_1x_dota.py
├── oriented_reppoints
│ ├── README.md
│ ├── metafile.yml
│ ├── oriented-reppoints-qbox_r50_fpn_1x_dota.py
│ └── oriented-reppoints-qbox_r50_fpn_mstrain-40e_dota.py
├── psc
│ ├── README.md
│ ├── metafile.yml
│ ├── rotated-fcos-hbox-le90_r50_fpn_psc-dual_1x_dota.py
│ ├── rotated-fcos-hbox-le90_r50_fpn_psc_rr-6x_hrsc.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_psc-dual_amp-1x_dota.py
│ └── rotated-retinanet-rbox-le90_r50_fpn_psc_rr-6x_hrsc.py
├── r3det
│ ├── README.md
│ ├── metafile.yml
│ ├── r3det-oc_r50_fpn_1x_dota.py
│ ├── r3det-refine-oc_r50_fpn_1x_dota.py
│ └── r3det-tiny-oc_r50_fpn_1x_dota.py
├── redet
│ ├── README.md
│ ├── metafile.yml
│ ├── redet-le90_re50_refpn_1x_dota.py
│ ├── redet-le90_re50_refpn_3x_hrsc.py
│ ├── redet-le90_re50_refpn_amp-1x_dota.py
│ └── redet-le90_re50_refpn_rr-1x_dota-ms.py
├── roi_trans
│ ├── README.md
│ ├── metafile.yml
│ ├── roi-trans-le135_r50_fpn_1x_dota.py
│ ├── roi-trans-le90_r50_fpn_1x_dota-ms.py
│ ├── roi-trans-le90_r50_fpn_1x_dota.py
│ ├── roi-trans-le90_r50_fpn_amp-1x_dota.py
│ ├── roi-trans-le90_r50_fpn_rr-1x_dota-ms.py
│ ├── roi-trans-le90_swin-tiny_fpn_1x_dota.py
│ └── roi-trans-oc_r50_fpn_1x_dota.py
├── rotated_atss
│ ├── README.md
│ ├── metafile.yml
│ └── rotated-atss-le90_r50_fpn_1x_dota.py
├── rotated_faster_rcnn
│ ├── README.md
│ ├── metafile.yml
│ └── rotated-faster-rcnn-le90_r50_fpn_1x_dota.py
├── rotated_fcos
│ ├── README.md
│ ├── metafile.yml
│ ├── rotated-fcos-hbox-le90_r50_fpn_1x_dota.py
│ ├── rotated-fcos-hbox-le90_r50_fpn_csl-gaussian_1x_dota.py
│ ├── rotated-fcos-le90_r50_fpn_1x_dota.py
│ ├── rotated-fcos-le90_r50_fpn_kld_1x_dota.py
│ └── rotated-fcos-le90_r50_fpn_rr-6x_hrsc.py
├── rotated_reppoints
│ ├── README.md
│ ├── metafile.yml
│ └── rotated-reppoints-qbox_r50_fpn_1x_dota.py
├── rotated_retinanet
│ ├── README.md
│ ├── metafile.yml
│ ├── rotated-retinanet-hbox-le135_r50_fpn_1x_dota.py
│ ├── rotated-retinanet-hbox-le90_r50_fpn_1x_dota.py
│ ├── rotated-retinanet-hbox-oc_r50_fpn_1x_dota.py
│ ├── rotated-retinanet-hbox-oc_r50_fpn_rr-6x_hrsc.py
│ ├── rotated-retinanet-rbox-le135_r50_fpn_1x_dota.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_1x_dior.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_amp-1x_dota.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_rr-1x_dota-ms.py
│ ├── rotated-retinanet-rbox-le90_r50_fpn_rr-6x_hrsc.py
│ ├── rotated-retinanet-rbox-oc_r50_fpn_1x_dior.py
│ └── rotated-retinanet-rbox-oc_r50_fpn_1x_dota.py
├── rotated_rtmdet
│ ├── README.md
│ ├── _base_
│ │ ├── default_runtime.py
│ │ ├── dota_rr.py
│ │ ├── dota_rr_ms.py
│ │ ├── hrsc_rr.py
│ │ └── schedule_3x.py
│ ├── metafile.yml
│ ├── rotated_rtmdet_l-100e-aug-dota.py
│ ├── rotated_rtmdet_l-300e-aug-hrsc.py
│ ├── rotated_rtmdet_l-3x-dota.py
│ ├── rotated_rtmdet_l-3x-dota_ms.py
│ ├── rotated_rtmdet_l-9x-hrsc.py
│ ├── rotated_rtmdet_l-coco_pretrain-3x-dota_ms.py
│ ├── rotated_rtmdet_m-3x-dota.py
│ ├── rotated_rtmdet_m-3x-dota_ms.py
│ ├── rotated_rtmdet_s-3x-dota.py
│ ├── rotated_rtmdet_s-3x-dota_ms.py
│ ├── rotated_rtmdet_tiny-300e-aug-hrsc.py
│ ├── rotated_rtmdet_tiny-3x-dota.py
│ ├── rotated_rtmdet_tiny-3x-dota_ms.py
│ └── rotated_rtmdet_tiny-9x-hrsc.py
├── s2anet
│ ├── README.md
│ ├── cascade-s2anet-le135_r50_fpn_1x_dota.py
│ ├── metafile.yml
│ ├── s2anet-le135_r50_fpn_1x_dota.py
│ ├── s2anet-le135_r50_fpn_3x_hrsc.py
│ ├── s2anet-le135_r50_fpn_amp-1x_dota.py
│ └── s2anet-le90_r50_fpn_1x_dota.py
└── sasm_reppoints
│ ├── README.md
│ ├── metafile.yml
│ └── sasm-reppoints-qbox_r50_fpn_1x_dota.py
├── demo
├── MMRotate_Tutorial.ipynb
├── demo.jpg
├── dota_demo.jpg
├── huge_image_demo.py
└── image_demo.py
├── docker
├── Dockerfile
└── serve
│ ├── Dockerfile
│ ├── config.properties
│ └── entrypoint.sh
├── docs
├── en
│ ├── _static
│ │ ├── css
│ │ │ └── readthedocs.css
│ │ └── image
│ │ │ └── mmrotate-logo.png
│ ├── advanced_guides
│ │ ├── conventions.md
│ │ ├── customize_dataset.md
│ │ ├── customize_losses.md
│ │ ├── customize_models.md
│ │ ├── customize_runtime.md
│ │ ├── customize_transforms.md
│ │ ├── data_flow.md
│ │ ├── datasets.md
│ │ ├── engine.md
│ │ ├── evaluation.md
│ │ ├── how_to.md
│ │ ├── index.rst
│ │ ├── models.md
│ │ └── transforms.md
│ ├── api.rst
│ ├── conf.py
│ ├── get_started.md
│ ├── index.rst
│ ├── make.bat
│ ├── migration.md
│ ├── model_zoo.md
│ ├── notes
│ │ ├── changelog.md
│ │ ├── changelog_v0.x.md
│ │ ├── contribution_guide.md
│ │ ├── faq.md
│ │ └── projects.md
│ ├── overview.md
│ ├── stat.py
│ ├── switch_language.md
│ └── user_guides
│ │ ├── config.md
│ │ ├── dataset_prepare.md
│ │ ├── deploy.md
│ │ ├── index.rst
│ │ ├── inference.md
│ │ ├── label_studio.md
│ │ ├── test_results_submission.md
│ │ ├── train_test.md
│ │ ├── useful_tools.md
│ │ └── visualization.md
└── zh_cn
│ ├── _static
│ ├── css
│ │ └── readthedocs.css
│ └── image
│ │ └── mmrotate-logo.png
│ ├── advanced_guides
│ ├── conventions.md
│ ├── customize_dataset.md
│ ├── customize_losses.md
│ ├── customize_models.md
│ ├── customize_runtime.md
│ ├── customize_transforms.md
│ ├── data_flow.md
│ ├── datasets.md
│ ├── engine.md
│ ├── evaluation.md
│ ├── how_to.md
│ ├── index.rst
│ ├── models.md
│ └── transforms.md
│ ├── api.rst
│ ├── conf.py
│ ├── get_started.md
│ ├── index.rst
│ ├── make.bat
│ ├── migration.md
│ ├── model_zoo.md
│ ├── notes
│ ├── contribution_guide.md
│ ├── faq.md
│ └── projects.md
│ ├── overview.md
│ ├── stat.py
│ ├── switch_language.md
│ └── user_guides
│ ├── config.md
│ ├── dataset_prepare.md
│ ├── deploy.md
│ ├── index.rst
│ ├── inference.md
│ ├── label_studio.md
│ ├── test_results_submission.md
│ ├── train_test.md
│ ├── useful_tools.md
│ └── visualization.md
├── mmrotate
├── __init__.py
├── apis
│ ├── __init__.py
│ └── inference.py
├── datasets
│ ├── __init__.py
│ ├── dior.py
│ ├── dota.py
│ ├── hrsc.py
│ ├── instance_dataset.py
│ ├── nwpu45.py
│ └── transforms
│ │ ├── __init__.py
│ │ ├── loading.py
│ │ └── transforms.py
├── engine
│ ├── __init__.py
│ └── hooks
│ │ ├── __init__.py
│ │ └── mean_teacher_hook.py
├── evaluation
│ ├── __init__.py
│ ├── functional
│ │ ├── __init__.py
│ │ └── mean_ap.py
│ └── metrics
│ │ ├── __init__.py
│ │ ├── dota_metric.py
│ │ └── rotated_coco_metric.py
├── models
│ ├── __init__.py
│ ├── backbones
│ │ ├── __init__.py
│ │ └── re_resnet.py
│ ├── dense_heads
│ │ ├── __init__.py
│ │ ├── angle_branch_retina_head.py
│ │ ├── cfa_head.py
│ │ ├── h2rbox_head.py
│ │ ├── h2rbox_v2_head.py
│ │ ├── oriented_reppoints_head.py
│ │ ├── oriented_rpn_head.py
│ │ ├── r3_head.py
│ │ ├── rhino_align_head.py
│ │ ├── rhino_head.py
│ │ ├── rhino_ph_head.py
│ │ ├── rhino_phc_head.py
│ │ ├── rotated_atss_head.py
│ │ ├── rotated_conditional_detr_head.py
│ │ ├── rotated_dab_detr_head.py
│ │ ├── rotated_deformable_detr_head.py
│ │ ├── rotated_detr_head.py
│ │ ├── rotated_fcos_head.py
│ │ ├── rotated_reppoints_head.py
│ │ ├── rotated_retina_head.py
│ │ ├── rotated_rtmdet_head.py
│ │ ├── s2a_head.py
│ │ └── sam_reppoints_head.py
│ ├── detectors
│ │ ├── __init__.py
│ │ ├── h2rbox.py
│ │ ├── h2rbox_v2.py
│ │ ├── refine_single_stage.py
│ │ ├── rhino.py
│ │ ├── rotated_dab_detr.py
│ │ ├── rotated_deformable_detr.py
│ │ ├── rotated_soft_teacher.py
│ │ └── semi_base.py
│ ├── layers
│ │ ├── __init__.py
│ │ ├── align.py
│ │ └── transformer
│ │ │ ├── __init__.py
│ │ │ ├── rhino_layers.py
│ │ │ ├── rhino_layers_v2.py
│ │ │ ├── rotated_attention.py
│ │ │ ├── rotated_dab_detr_layers.py
│ │ │ ├── rotated_deformable_detr_layers.py
│ │ │ └── utils.py
│ ├── losses
│ │ ├── __init__.py
│ │ ├── convex_giou_loss.py
│ │ ├── gaussian_dist_loss.py
│ │ ├── gaussian_dist_loss_v1.py
│ │ ├── h2rbox_consistency_loss.py
│ │ ├── h2rbox_v2_consistency_loss.py
│ │ ├── kf_iou_loss.py
│ │ ├── rotated_iou_loss.py
│ │ ├── smooth_focal_loss.py
│ │ └── spatial_border_loss.py
│ ├── necks
│ │ ├── __init__.py
│ │ └── re_fpn.py
│ ├── roi_heads
│ │ ├── __init__.py
│ │ ├── bbox_heads
│ │ │ ├── __init__.py
│ │ │ ├── convfc_rbbox_head.py
│ │ │ └── gv_bbox_head.py
│ │ ├── gv_ratio_roi_head.py
│ │ └── roi_extractors
│ │ │ ├── __init__.py
│ │ │ └── rotate_single_level_roi_extractor.py
│ ├── task_modules
│ │ ├── __init__.py
│ │ ├── assigners
│ │ │ ├── __init__.py
│ │ │ ├── convex_assigner.py
│ │ │ ├── dn_group_hungarian_assigner.py
│ │ │ ├── match_cost.py
│ │ │ ├── max_convex_iou_assigner.py
│ │ │ ├── rotate_iou2d_calculator.py
│ │ │ ├── rotated_atss_assigner.py
│ │ │ └── sas_assigner.py
│ │ ├── coders
│ │ │ ├── __init__.py
│ │ │ ├── angle_coder.py
│ │ │ ├── delta_midpointoffset_rbbox_coder.py
│ │ │ ├── delta_xywh_hbbox_coder.py
│ │ │ ├── delta_xywh_qbbox_coder.py
│ │ │ ├── delta_xywht_hbbox_coder.py
│ │ │ ├── delta_xywht_rbbox_coder.py
│ │ │ ├── distance_angle_point_coder.py
│ │ │ └── gliding_vertex_coder.py
│ │ └── prior_generators
│ │ │ ├── __init__.py
│ │ │ └── anchor_generator.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── enn.py
│ │ ├── misc.py
│ │ ├── orconv.py
│ │ └── ripool.py
├── registry.py
├── structures
│ ├── __init__.py
│ └── bbox
│ │ ├── __init__.py
│ │ ├── bbox_overlaps.py
│ │ ├── box_converters.py
│ │ ├── quadri_boxes.py
│ │ ├── rotated_boxes.py
│ │ └── transforms.py
├── testing
│ ├── __init__.py
│ └── _utils.py
├── utils
│ ├── __init__.py
│ ├── collect_env.py
│ ├── misc.py
│ ├── patch
│ │ ├── __init__.py
│ │ ├── merge_results.py
│ │ └── split.py
│ └── setup_env.py
├── version.py
└── visualization
│ ├── __init__.py
│ ├── local_visualizer.py
│ └── palette.py
├── model-index.yml
├── projects
├── CastDetv2
│ ├── README.md
│ ├── castdet
│ │ ├── __init__.py
│ │ ├── castdet.py
│ │ ├── modified_resnet.py
│ │ ├── ovd_bbox_head.py
│ │ ├── pseudo_label_queue.py
│ │ └── standard_roi_head2.py
│ ├── configs
│ │ ├── oriented-rcnn_r50-fpn_20k_visdronezsd_base-set.py
│ │ └── visdrone_step2_castdet_12b_10k_oriented.py
│ ├── resources
│ │ ├── castdet.png
│ │ ├── remoteCLIP_embeddings_bgs_normalized.npy
│ │ ├── remoteCLIP_embeddings_normalized.npy
│ │ ├── results_hbb.png
│ │ ├── results_obb.png
│ │ ├── toolbox.png
│ │ ├── vis_result.png
│ │ └── visdronezsd_split
│ │ │ ├── test.txt
│ │ │ ├── visdrone_initialize.txt
│ │ │ ├── visdrone_labeled_3000.txt
│ │ │ ├── visdrone_test.txt
│ │ │ └── visdrone_unlabeled_8726.txt
│ ├── run.sh
│ └── tools
│ │ ├── generate_text_embeddings.py
│ │ └── merge_weights.py
├── GLIP
│ ├── README.md
│ ├── configs
│ │ ├── glip_atss_r50_a_fpn_dyhead_visdronezsd_base.py
│ │ ├── glip_atss_r50_a_fpn_dyhead_visdronezsd_base_nwpu.py
│ │ └── glip_atss_r50_a_fpn_dyhead_visdronezsd_base_nwpu45_pseudo_labeling.py
│ ├── glip
│ │ ├── __init__.py
│ │ └── atss_vlfusion_head.py
│ └── run.sh
├── GroundingDINO
│ ├── README.md
│ ├── configs
│ │ ├── grounding_dino_swin-t_visdrone_base-set_adamw.py
│ │ ├── grounding_dino_swin-t_visdrone_base-set_adamw_nwpu45.py
│ │ └── grounding_dino_swin-t_visdrone_base-set_adamw_nwpu45_pseudo_labeling.py
│ ├── groundingdino
│ │ ├── __init__.py
│ │ ├── grounding_dino.py
│ │ ├── grounding_dino_head.py
│ │ └── grounding_dino_layers.py
│ ├── run.sh
│ └── tools
│ │ └── merge_ovdg_preds.py
├── LSKNet
│ ├── README.md
│ ├── configs
│ │ ├── lsk_s_ema_fpn_1x_dota_le90.py
│ │ ├── lsk_s_fpn_1x_dota_le90.py
│ │ ├── lsk_s_fpn_3x_hrsc_le90.py
│ │ └── lsk_t_fpn_1x_dota_le90.py
│ └── lsknet
│ │ ├── __init__.py
│ │ └── lsknet.py
├── LabelStudio
│ └── backend_template
│ │ ├── _mmrotate.py
│ │ ├── _wsgi.py
│ │ └── readme.md
├── RR360
│ ├── configs360
│ │ ├── .gitkeep
│ │ ├── _base_
│ │ │ ├── datasets
│ │ │ │ └── dota.py
│ │ │ ├── default_runtime.py
│ │ │ └── schedules
│ │ │ │ ├── schedule_1x.py
│ │ │ │ ├── schedule_3x.py
│ │ │ │ └── schedule_6x.py
│ │ ├── readme.md
│ │ ├── rotated_retinanet
│ │ │ └── rotated-retinanet-rbox-r360_r50_fpn_6x_ic19.py
│ │ ├── rotated_rtmdet_x3
│ │ │ ├── _base_
│ │ │ │ ├── default_runtime.py
│ │ │ │ ├── dota_rr.py
│ │ │ │ └── schedule_3x.py
│ │ │ └── rotated_rtmdet_l_l1-3x-ic19_pt.py
│ │ └── rotated_rtmdet_x3_r
│ │ │ ├── _base_
│ │ │ ├── default_runtime.py
│ │ │ ├── dota_rr.py
│ │ │ └── schedule_3x.py
│ │ │ ├── rotated_rtmdet_l-3x-ic19_pt.py
│ │ │ ├── rotated_rtmdet_l_l1-3x-ic19_pt.py
│ │ │ ├── rotated_rtmdet_m_l1-3x-ic19_pt.py
│ │ │ ├── rotated_rtmdet_s_l1-3x-ic19_pt.py
│ │ │ └── rotated_rtmdet_tiny_l1-3x-ic19_pt.py
│ ├── datasets
│ │ ├── __init__.py
│ │ └── transforms
│ │ │ ├── __init__.py
│ │ │ └── transforms.py
│ ├── evaluation
│ │ ├── __init__.py
│ │ ├── functional
│ │ │ ├── __init__.py
│ │ │ └── mean_ap.py
│ │ └── metrics
│ │ │ ├── __init__.py
│ │ │ └── dota_r360_metric.py
│ ├── readme.md
│ ├── readme_Zh.md
│ ├── structures
│ │ ├── __init__.py
│ │ └── bbox
│ │ │ ├── __init__.py
│ │ │ └── rotated_boxes.py
│ ├── tools
│ │ ├── browse_dataset.py
│ │ ├── test.py
│ │ └── train.py
│ └── visualization
│ │ ├── __init__.py
│ │ └── local_visualizer.py
├── ViLD
│ ├── README.md
│ ├── configs
│ │ ├── vild_oriented-rcnn_r50_fpn_visdronezsd_step1_prepare.py
│ │ ├── vild_oriented-rcnn_r50_fpn_visdronezsd_step2_finetune.py
│ │ └── vild_visdronezsd.py
│ ├── resources
│ │ └── vild_framework.png
│ ├── run.sh
│ └── vild
│ │ ├── __init__.py
│ │ ├── modified_resnet.py
│ │ ├── ovd_bbox_head.py
│ │ └── rotated_vild.py
└── example_project
│ ├── README.md
│ ├── configs
│ └── r3det-oc_dummy-resnet_fpn_1x_dota.py
│ └── dummy
│ ├── __init__.py
│ └── dummy_resnet.py
├── requirements.txt
├── requirements
├── build.txt
├── docs.txt
├── mminstall.txt
├── multimodal.txt
├── optional.txt
├── readthedocs.txt
├── runtime.txt
└── tests.txt
├── resources
├── mmrotate-logo.png
├── qq_group_qrcode.jpg
└── zhihu_qrcode.jpg
├── setup.cfg
├── setup.py
├── tests
├── data
│ ├── dior
│ │ ├── Annotations
│ │ │ └── Oriented Bounding Boxes
│ │ │ │ └── 00001.xml
│ │ ├── JPEGImage
│ │ │ └── 00001.jpg
│ │ └── demo.txt
│ ├── dota
│ │ ├── images
│ │ │ └── P2805__1024__0___0.png
│ │ └── labelTxt
│ │ │ └── P2805__1024__0___0.txt
│ └── hrsc
│ │ ├── FullDataSet
│ │ └── Annotations
│ │ │ └── 100000006.xml
│ │ └── demo.txt
├── test_apis
│ └── test_inference.py
├── test_datasets
│ ├── test_dior.py
│ ├── test_dota.py
│ ├── test_hrsc.py
│ └── test_transforms
│ │ └── test_transforms.py
├── test_evaluation
│ └── test_metrics
│ │ ├── test_dota_metric.py
│ │ └── test_rotated_coco_metric.py
├── test_models
│ ├── test_backbones
│ │ └── test_re_resnet.py
│ ├── test_dense_heads
│ │ ├── test_angle_branch_retina_head.py
│ │ ├── test_cfa_head.py
│ │ ├── test_h2rbox_head.py
│ │ ├── test_oriented_reppoints.py
│ │ ├── test_r3_head.py
│ │ ├── test_rotated_atss_head.py
│ │ ├── test_rotated_fcos_head.py
│ │ ├── test_rotated_reppoints.py
│ │ ├── test_rotated_retina_head.py
│ │ ├── test_rotated_rtmdet_head.py
│ │ ├── test_s2a_head.py
│ │ └── test_sam_reppoints_head.py
│ ├── test_detectors
│ │ ├── test_h2rbox.py
│ │ ├── test_refine_single_stage.py
│ │ ├── test_single_stage.py
│ │ └── test_two_stage.py
│ ├── test_losses
│ │ └── test_loss.py
│ ├── test_necks
│ │ └── test_re_fpn.py
│ ├── test_roi_heads
│ │ ├── test_bbox_head
│ │ │ ├── test_convfc_rbbox_head.py
│ │ │ └── test_gv_bbox_head.py
│ │ └── test_gv_ratio_roi_head.py
│ └── test_task_modules
│ │ ├── test_assigners
│ │ ├── test_convex_assigner.py
│ │ ├── test_max_convex_iou_assigner.py
│ │ ├── test_rotated_atss_assigner.py
│ │ └── test_sas_assigner.py
│ │ ├── test_coder
│ │ ├── test_angle_coder.py
│ │ ├── test_delta_midpointoffset_rbbox_coder.py
│ │ ├── test_delta_xywh_hbbox_coder.py
│ │ ├── test_delta_xywh_qbbox_coder.py
│ │ ├── test_delta_xywht_hbbox_coder.py
│ │ ├── test_delta_xywht_rbbox_coder.py
│ │ └── test_gliding_vertex_coder.py
│ │ ├── test_prior_generators
│ │ └── test_anchor_generator.py
│ │ └── test_rotated_iou2d_calculator.py
├── test_structures
│ └── test_bbox
│ │ ├── test_box_converters.py
│ │ ├── test_quadri_boxes.py
│ │ └── test_rotated_boxes.py
└── test_visualization
│ ├── test_local_visualizer.py
│ └── test_palette.py
└── tools
├── analysis_tools
├── analyze_logs.py
├── benchmark.py
├── browse_dataset.py
├── confusion_matrix.py
└── get_flops.py
├── data
├── README.md
├── dior
│ └── README.md
├── dota
│ ├── README.md
│ ├── dota2coco.py
│ └── split
│ │ ├── img_split.py
│ │ └── split_configs
│ │ ├── ms_test.json
│ │ ├── ms_train.json
│ │ ├── ms_trainval.json
│ │ ├── ms_val.json
│ │ ├── ss_test.json
│ │ ├── ss_train.json
│ │ ├── ss_trainval.json
│ │ └── ss_val.json
├── hrsc
│ └── README.md
├── hrsid
│ └── README.md
├── rsdd
│ └── README.md
├── srsdd
│ └── README.md
└── ssdd
│ └── README.md
├── deployment
├── mmrotate2torchserve.py
└── mmrotate_handler.py
├── dist_test.sh
├── dist_train.sh
├── misc
└── print_config.py
├── model_converters
└── publish_model.py
├── slurm_test.sh
├── slurm_train.sh
├── test.py
└── train.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | We appreciate all contributions to improve MMRotate. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline.
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/2-feature-request.yml:
--------------------------------------------------------------------------------
1 | name: 🚀 Feature request
2 | description: Suggest an idea for this project
3 | labels: "kind/enhancement,status/unconfirmed"
4 | title: "[Feature] "
5 |
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | We strongly appreciate you creating a PR to implement this feature [here](https://github.com/open-mmlab/mmrotate/pulls)!
11 | If you need our help, please fill in as much of the following form as you're able to.
12 |
13 | **The less clear the description, the longer it will take to solve it.**
14 |
15 | - type: textarea
16 | attributes:
17 | label: What's the feature?
18 | description: |
19 | Tell us more about the feature and how this feature can help.
20 | placeholder: |
21 | E.g., It is inconvenient when \[....\].
22 | This feature can \[....\].
23 | validations:
24 | required: true
25 |
26 | - type: textarea
27 | attributes:
28 | label: Any other context?
29 | description: |
30 | Have you considered any alternative solutions or features? If so, what are they?
31 | Also, feel free to add any other context or screenshots about the feature request here.
32 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/3-new-model.yml:
--------------------------------------------------------------------------------
1 | name: "\U0001F31F New model/dataset/scheduler addition"
2 | description: Submit a proposal/request to implement a new model / dataset / scheduler
3 | labels: "kind/feature,status/unconfirmed"
4 | title: "[New Models] "
5 |
6 |
7 | body:
8 | - type: textarea
9 | id: description-request
10 | validations:
11 | required: true
12 | attributes:
13 | label: Model/Dataset/Scheduler description
14 | description: |
15 | Put any and all important information relative to the model/dataset/scheduler
16 |
17 | - type: checkboxes
18 | attributes:
19 | label: Open source status
20 | description: |
21 | Please provide the open-source status, which would be very helpful
22 | options:
23 | - label: "The model implementation is available"
24 | - label: "The model weights are available."
25 |
26 | - type: textarea
27 | id: additional-info
28 | attributes:
29 | label: Provide useful links for the implementation
30 | description: |
31 | Please provide information regarding the implementation, the weights, and the authors.
32 | Please mention the authors by @gh-username if you're aware of their usernames.
33 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/4-documentation.yml:
--------------------------------------------------------------------------------
1 | name: 📚 Documentation
2 | description: Report an issue related to the documentation.
3 | labels: "kind/doc,status/unconfirmed"
4 | title: "[Docs] "
5 |
6 | body:
7 | - type: dropdown
8 | id: branch
9 | attributes:
10 | label: Branch
11 | description: This issue is related to the
12 | options:
13 | - master branch https://mmrotate.readthedocs.io/en/latest/
14 | - 1.x branch https://mmrotate.readthedocs.io/en/1.x/
15 | validations:
16 | required: true
17 |
18 | - type: textarea
19 | attributes:
20 | label: 📚 The doc issue
21 | description: >
22 | A clear and concise description the issue.
23 | validations:
24 | required: true
25 |
26 | - type: textarea
27 | attributes:
28 | label: Suggest a potential alternative/fix
29 | description: >
30 | Tell us how we could improve the documentation in this regard.
31 | - type: markdown
32 | attributes:
33 | value: >
34 | Thanks for contributing 🎉!
35 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 |
3 | contact_links:
4 | - name: Common Issues
5 | url: https://mmrotate.readthedocs.io/en/latest/faq.html
6 | about: Check if your issue already has solutions
7 | - name: MMRotate Documentation
8 | url: https://mmrotate.readthedocs.io/en/latest/
9 | about: Check if your question is answered in docs
10 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.
2 |
3 | ## Motivation
4 |
5 | Please describe the motivation of this PR and the goal you want to achieve through this PR.
6 |
7 | ## Modification
8 |
9 | Please briefly describe what modification is made in this PR.
10 |
11 | ## BC-breaking (Optional)
12 |
13 | Does the modification introduce changes that break the back-compatibility of the downstream repos?
14 | If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.
15 |
16 | ## Use cases (Optional)
17 |
18 | If this PR introduces a new feature, it is better to list some use cases here, and update the documentation.
19 |
20 | ## Checklist
21 |
22 | 1. Pre-commit or other linting tools are used to fix the potential lint issues.
23 | 2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness.
24 | 3. The documentation has been modified accordingly, like docstring or example tutorials.
25 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: lint
2 |
3 | on: [push, pull_request]
4 |
5 | concurrency:
6 | group: ${{ github.workflow }}-${{ github.ref }}
7 | cancel-in-progress: true
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v2
14 | - name: Set up Python 3.7
15 | uses: actions/setup-python@v2
16 | with:
17 | python-version: 3.7
18 | - name: Install pre-commit hook
19 | run: |
20 | pip install pre-commit
21 | pre-commit install
22 | - name: Linting
23 | run: pre-commit run --all-files
24 | - name: Check docstring coverage
25 | run: |
26 | pip install interrogate
27 | interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 90 mmrotate
28 |
--------------------------------------------------------------------------------
/.github/workflows/publish-to-pypi.yml:
--------------------------------------------------------------------------------
1 | name: deploy
2 |
3 | on: push
4 |
5 | concurrency:
6 | group: ${{ github.workflow }}-${{ github.ref }}
7 | cancel-in-progress: true
8 |
9 | jobs:
10 | build-n-publish:
11 | runs-on: ubuntu-latest
12 | if: startsWith(github.event.ref, 'refs/tags')
13 | steps:
14 | - uses: actions/checkout@v2
15 | - name: Set up Python 3.7
16 | uses: actions/setup-python@v1
17 | with:
18 | python-version: 3.7
19 | - name: Build MMRotate
20 | run: |
21 | pip install wheel
22 | python setup.py sdist bdist_wheel
23 | - name: Publish distribution to PyPI
24 | run: |
25 | pip install twine
26 | twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
27 |
--------------------------------------------------------------------------------
/.github/workflows/scripts/get_mmcv_var.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | TORCH=$1
4 | CUDA=$2
5 |
6 | # 10.2 -> cu102
7 | MMCV_CUDA="cu`echo ${CUDA} | tr -d '.'`"
8 |
9 | # MMCV only provides pre-compiled packages for torch 1.x.0
10 | # which works for any subversions of torch 1.x.
11 | # We force the torch version to be 1.x.0 to ease package searching
12 | # and avoid unnecessary rebuild during MMCV's installation.
13 | TORCH_VER_ARR=(${TORCH//./ })
14 | TORCH_VER_ARR[2]=0
15 | printf -v MMCV_TORCH "%s." "${TORCH_VER_ARR[@]}"
16 | MMCV_TORCH=${MMCV_TORCH%?} # Remove the last dot
17 |
18 | echo "MMCV_CUDA=${MMCV_CUDA}" >> $GITHUB_ENV
19 | echo "MMCV_TORCH=${MMCV_TORCH}" >> $GITHUB_ENV
20 |
--------------------------------------------------------------------------------
/.github/workflows/test_mim.yml:
--------------------------------------------------------------------------------
1 | name: test-mim
2 |
3 | on:
4 | push:
5 | paths:
6 | - 'model-index.yml'
7 | - 'configs/**'
8 |
9 | pull_request:
10 | paths:
11 | - 'model-index.yml'
12 | - 'configs/**'
13 |
14 | concurrency:
15 | group: ${{ github.workflow }}-${{ github.ref }}
16 | cancel-in-progress: true
17 |
18 | jobs:
19 | build_cpu:
20 | runs-on: ubuntu-18.04
21 | strategy:
22 | matrix:
23 | python-version: [3.7]
24 | torch: [1.8.0]
25 | include:
26 | - torch: 1.8.0
27 | torch_version: torch1.8
28 | torchvision: 0.9.0
29 | steps:
30 | - uses: actions/checkout@v2
31 | - name: Set up Python ${{ matrix.python-version }}
32 | uses: actions/setup-python@v2
33 | with:
34 | python-version: ${{ matrix.python-version }}
35 | - name: Upgrade pip
36 | run: pip install pip --upgrade
37 | - name: Install PyTorch
38 | run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
39 | - name: Install openmim
40 | run: pip install openmim
41 | - name: Build and install
42 | run: rm -rf .eggs && mim install -e .
43 | - name: test commands of mim
44 | run: mim search mmrotate
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .nox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | .hypothesis/
49 | .pytest_cache/
50 | .idea/*
51 |
52 | # Translations
53 | *.mo
54 | *.pot
55 |
56 | # Django stuff:
57 | *.log
58 | local_settings.py
59 | db.sqlite3
60 |
61 | # Flask stuff:
62 | instance/
63 | .webassets-cache
64 |
65 | # Scrapy stuff:
66 | .scrapy
67 |
68 | # Sphinx documentation
69 | docs/en/_build/
70 | docs/zh_cn/_build/
71 | src
72 |
73 | # PyBuilder
74 | target/
75 |
76 | # Jupyter Notebook
77 | .ipynb_checkpoints
78 |
79 | # IPython
80 | profile_default/
81 | ipython_config.py
82 |
83 | # pyenv
84 | .python-version
85 |
86 | # celery beat schedule file
87 | celerybeat-schedule
88 |
89 | # SageMath parsed files
90 | *.sage.py
91 |
92 | # Environments
93 | .env
94 | .venv
95 | env/
96 | venv/
97 | ENV/
98 | env.bak/
99 | venv.bak/
100 |
101 | # Spyder project settings
102 | .spyderproject
103 | .spyproject
104 |
105 | # Rope project settings
106 | .ropeproject
107 |
108 | # mkdocs documentation
109 | /site
110 |
111 | # mypy
112 | .mypy_cache/
113 | .dmypy.json
114 | dmypy.json
115 |
116 | # Pyre type checker
117 | .pyre/
118 | .DS_Store
119 | .idea
120 | *work_dirs*
121 | data/
122 | data
123 | tmp
124 | debug_demo*
125 |
126 | # custom
127 | mmrotate/.mim
128 | .bak
129 | .vscode
130 | outputs/
131 | checkpoints*
--------------------------------------------------------------------------------
/.pre-commit-config-zh-cn.yaml:
--------------------------------------------------------------------------------
1 | exclude: ^tests/data/
2 | repos:
3 | - repo: https://gitee.com/openmmlab/mirrors-flake8
4 | rev: 5.0.4
5 | hooks:
6 | - id: flake8
7 | - repo: https://gitee.com/openmmlab/mirrors-isort
8 | rev: 5.10.1
9 | hooks:
10 | - id: isort
11 | - repo: https://gitee.com/openmmlab/mirrors-yapf
12 | rev: v0.32.0
13 | hooks:
14 | - id: yapf
15 | - repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
16 | rev: v4.3.0
17 | hooks:
18 | - id: trailing-whitespace
19 | - id: check-yaml
20 | - id: end-of-file-fixer
21 | - id: requirements-txt-fixer
22 | - id: double-quote-string-fixer
23 | - id: check-merge-conflict
24 | - id: fix-encoding-pragma
25 | args: ["--remove"]
26 | - id: mixed-line-ending
27 | args: ["--fix=lf"]
28 | - repo: https://gitee.com/openmmlab/mirrors-mdformat
29 | rev: 0.7.9
30 | hooks:
31 | - id: mdformat
32 | args: ["--number"]
33 | additional_dependencies:
34 | - mdformat-openmmlab
35 | - mdformat_frontmatter
36 | - linkify-it-py
37 | - repo: https://gitee.com/openmmlab/mirrors-codespell
38 | rev: v2.2.1
39 | hooks:
40 | - id: codespell
41 | - repo: https://gitee.com/openmmlab/mirrors-docformatter
42 | rev: v1.3.1
43 | hooks:
44 | - id: docformatter
45 | args: ["--in-place", "--wrap-descriptions", "79"]
46 | - repo: https://gitee.com/openmmlab/mirrors-pyupgrade
47 | rev: v3.0.0
48 | hooks:
49 | - id: pyupgrade
50 | args: ["--py36-plus"]
51 | - repo: https://github.com/open-mmlab/pre-commit-hooks
52 | rev: v0.2.0
53 | hooks:
54 | - id: check-copyright
55 | args: ["mmyolo", "tests"]
56 | # - repo: https://gitee.com/openmmlab/mirrors-mypy
57 | # rev: v0.812
58 | # hooks:
59 | # - id: mypy
60 | # exclude: "docs"
61 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/PyCQA/flake8
3 | rev: 5.0.4
4 | hooks:
5 | - id: flake8
6 | - repo: https://github.com/asottile/seed-isort-config
7 | rev: v2.2.0
8 | hooks:
9 | - id: seed-isort-config
10 | - repo: https://github.com/timothycrosley/isort
11 | rev: 4.3.21
12 | hooks:
13 | - id: isort
14 | - repo: https://github.com/pre-commit/mirrors-yapf
15 | rev: v0.32.0
16 | hooks:
17 | - id: yapf
18 | - repo: https://github.com/pre-commit/pre-commit-hooks
19 | rev: v4.3.0
20 | hooks:
21 | - id: trailing-whitespace
22 | - id: check-yaml
23 | - id: end-of-file-fixer
24 | - id: requirements-txt-fixer
25 | - id: double-quote-string-fixer
26 | - id: check-merge-conflict
27 | - id: fix-encoding-pragma
28 | args: [ "--remove" ]
29 | - id: mixed-line-ending
30 | args: [ "--fix=lf" ]
31 | - repo: https://github.com/codespell-project/codespell
32 | rev: v2.2.1
33 | hooks:
34 | - id: codespell
35 | args: [ '--ignore-words-list', 'DOTA' ]
36 | - repo: https://github.com/executablebooks/mdformat
37 | rev: 0.7.9
38 | hooks:
39 | - id: mdformat
40 | args: ["--number"]
41 | additional_dependencies:
42 | - mdformat-openmmlab
43 | - mdformat_frontmatter
44 | - linkify-it-py
45 | - repo: https://github.com/myint/docformatter
46 | rev: v1.3.1
47 | hooks:
48 | - id: docformatter
49 | args: [ "--in-place", "--wrap-descriptions", "79" ]
50 | - repo: https://github.com/open-mmlab/pre-commit-hooks
51 | rev: v0.2.0
52 | hooks:
53 | - id: check-copyright
54 | args: ["mmrotate", "tests", "demo", "tools"]
55 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | formats: all
4 |
5 | python:
6 | version: 3.7
7 | install:
8 | - requirements: requirements/docs.txt
9 | - requirements: requirements/readthedocs.txt
10 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - name: "MMRotate Contributors"
5 | title: "OpenMMLab rotated object detection toolbox and benchmark"
6 | date-released: 2022-02-18
7 | url: "https://github.com/open-mmlab/mmrotate"
8 | license: Apache-2.0
9 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements/*.txt
2 | include mmrotate/.mim/model-index.yml
3 | recursive-include mmrotate/.mim/configs *.py *.yml
4 | recursive-include mmrotate/.mim/tools *.py *.sh
5 |
--------------------------------------------------------------------------------
/configs/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | default_scope = 'mmrotate'
2 |
3 | default_hooks = dict(
4 | timer=dict(type='IterTimerHook'),
5 | logger=dict(type='LoggerHook', interval=50),
6 | param_scheduler=dict(type='ParamSchedulerHook'),
7 | checkpoint=dict(type='CheckpointHook', interval=1),
8 | sampler_seed=dict(type='DistSamplerSeedHook'),
9 | visualization=dict(type='mmdet.DetVisualizationHook'))
10 |
11 | env_cfg = dict(
12 | cudnn_benchmark=False,
13 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14 | dist_cfg=dict(backend='nccl'),
15 | )
16 |
17 | vis_backends = [dict(type='LocalVisBackend')]
18 | visualizer = dict(
19 | type='RotLocalVisualizer', vis_backends=vis_backends, name='visualizer')
20 | log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
21 |
22 | log_level = 'INFO'
23 | load_from = None
24 | resume = False
25 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/schedule_1x.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR',
10 | start_factor=1.0 / 3,
11 | by_epoch=False,
12 | begin=0,
13 | end=500),
14 | dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=12,
18 | by_epoch=True,
19 | milestones=[8, 11],
20 | gamma=0.1)
21 | ]
22 |
23 | # optimizer
24 | optim_wrapper = dict(
25 | type='OptimWrapper',
26 | optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
27 | clip_grad=dict(max_norm=35, norm_type=2))
28 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/schedule_3x.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=1)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR',
10 | start_factor=1.0 / 3,
11 | by_epoch=False,
12 | begin=0,
13 | end=500),
14 | dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=36,
18 | by_epoch=True,
19 | milestones=[24, 33],
20 | gamma=0.1)
21 | ]
22 |
23 | # optimizer
24 | optim_wrapper = dict(
25 | type='OptimWrapper',
26 | optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
27 | clip_grad=dict(max_norm=35, norm_type=2))
28 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/schedule_40e.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=40, val_interval=1)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR',
10 | start_factor=1.0 / 3,
11 | by_epoch=False,
12 | begin=0,
13 | end=500),
14 | dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=40,
18 | by_epoch=True,
19 | milestones=[24, 32, 38],
20 | gamma=0.1)
21 | ]
22 |
23 | # optimizer
24 | optim_wrapper = dict(
25 | type='OptimWrapper',
26 | optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
27 | clip_grad=dict(max_norm=35, norm_type=2))
28 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/schedule_6x.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=72, val_interval=1)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR',
10 | start_factor=1.0 / 3,
11 | by_epoch=False,
12 | begin=0,
13 | end=500),
14 | dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=72,
18 | by_epoch=True,
19 | milestones=[48, 66],
20 | gamma=0.1)
21 | ]
22 |
23 | # optimizer
24 | optim_wrapper = dict(
25 | type='OptimWrapper',
26 | optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
27 | clip_grad=dict(max_norm=35, norm_type=2))
28 |
--------------------------------------------------------------------------------
/configs/cfa/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: cfa
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://openaccess.thecvf.com/content/CVPR2021/papers/Guo_Beyond_Bounding-Box_Convex-Hull_Feature_Adaptation_for_Oriented_and_Densely_Packed_CVPR_2021_paper.pdf
13 | Title: 'Beyond Bounding-Box: Convex-hull Feature Adaptation for Oriented and Densely Packed Object Detection'
14 | README: configs/cfa/README.md
15 |
16 | Models:
17 | - Name: cfa-qbox_r50_fpn_1x_dota
18 | In Collection: cfa
19 | Config: configs/cfa/cfa-qbox_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 69.63
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/cfa/cfa_r50_fpn_1x_dota_le135/cfa_r50_fpn_1x_dota_le135-aed1cbc6.pth
28 |
29 | - Name: cfa-qbox_r50_fpn_40e_dota
30 | In Collection: cfa
31 | Config: configs/cfa/cfa-qbox_r50_fpn_40e_dota.py
32 | Metadata:
33 | Training Data: DOTAv1.0
34 | Results:
35 | - Task: Oriented Object Detection
36 | Dataset: DOTAv1.0
37 | Metrics:
38 | mAP: 73.45
39 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/cfa/cfa_r50_fpn_40e_dota_oc/cfa_r50_fpn_40e_dota_oc-2f387232.pth
40 |
--------------------------------------------------------------------------------
/configs/convnext/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: convnext
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - AdamW
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 3090
9 | Architecture:
10 | - ConvNeXt
11 | Paper:
12 | URL: https://arxiv.org/abs/2201.03545
13 | Title: 'A ConvNet for the 2020s'
14 | README: configs/convnext/README.md
15 |
16 | Models:
17 | - Name: rotated-retinanet-rbox-le90_convnext-tiny_fpn_kld-stable_adamw-1x_dota
18 | In Collection: convnext
19 | Config: configs/convnext/rotated-retinanet-rbox-le90_convnext-tiny_fpn_kld-stable_adamw-1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 74.49
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/convnext/rotated_retinanet_obb_kld_stable_convnext_adamw_fpn_1x_dota_le90/rotated_retinanet_obb_kld_stable_convnext_adamw_fpn_1x_dota_le90-388184f6.pth
28 |
--------------------------------------------------------------------------------
/configs/convnext/rotated-retinanet-rbox-le90_convnext-tiny_fpn_kld-stable_adamw-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../kld/rotated-retinanet-rbox-le90_r50_fpn_kld-stable_adamw-1x_dota.py'
3 | ]
4 |
5 | # please install mmcls>=1.0.0rc0
6 | # import mmcls.models to trigger register_module in mmcls
7 | custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
8 | checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
9 |
10 | model = dict(
11 | backbone=dict(
12 | _delete_=True,
13 | type='mmcls.ConvNeXt',
14 | arch='tiny',
15 | out_indices=[0, 1, 2, 3],
16 | drop_path_rate=0.4,
17 | layer_scale_init_value=1.0,
18 | gap_before_final_norm=False,
19 | init_cfg=dict(
20 | type='Pretrained', checkpoint=checkpoint_file,
21 | prefix='backbone.')),
22 | neck=dict(in_channels=[96, 192, 384, 768]))
23 |
24 | lr_config = dict(warmup_iters=1000)
25 | find_unused_parameters = True
26 |
--------------------------------------------------------------------------------
/configs/csl/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: csl
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x Quadro RTX 8000
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://link.springer.com/chapter/10.1007/978-3-030-58598-3_40
13 | Title: 'Arbitrary-Oriented Object Detection with Circular Smooth Label'
14 | README: configs/csl/README.md
15 |
16 | Models:
17 | - Name: rotated-retinanet-rbox-le90_r50_fpn_csl-gaussian_amp-1x_dota
18 | In Collection: csl
19 | Config: configs/csl/rotated-retinanet-rbox-le90_r50_fpn_csl-gaussian_amp-1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 69.51
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/csl/rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90/rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90-b4271aed.pth
28 |
--------------------------------------------------------------------------------
/configs/csl/rotated-retinanet-rbox-le90_r50_fpn_csl-gaussian_amp-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = \
2 | ['../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_amp-1x_dota.py']
3 |
4 | angle_version = 'le90'
5 | model = dict(
6 | bbox_head=dict(
7 | type='AngleBranchRetinaHead',
8 | angle_coder=dict(
9 | type='CSLCoder',
10 | angle_version=angle_version,
11 | omega=4,
12 | window='gaussian',
13 | radius=3),
14 | loss_angle=dict(
15 | type='SmoothFocalLoss', gamma=2.0, alpha=0.25, loss_weight=0.8)))
16 |
--------------------------------------------------------------------------------
/configs/gliding_vertex/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: gliding_vertex
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://arxiv.org/pdf/1911.09358.pdf
13 | Title: 'Gliding Vertex on the Horizontal Bounding Box for Multi-Oriented Object Detection'
14 | README: configs/cfa/README.md
15 |
16 | Models:
17 | - Name: gliding-vertex-rbox_r50_fpn_1x_dota
18 | In Collection: gliding_vertex
19 | Config: configs/gliding_vertex/gliding-vertex-rbox_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 73.23
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/gliding_vertex/gliding_vertex_r50_fpn_1x_dota_le90/gliding_vertex_r50_fpn_1x_dota_le90-12e7423c.pth
28 |
--------------------------------------------------------------------------------
/configs/gwd/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: gwd
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://arxiv.org/pdf/2101.11952.pdf
13 | Title: 'Rethinking Rotated Object Detection with Gaussian Wasserstein Distance Loss'
14 | README: configs/gwd/README.md
15 |
16 | Models:
17 | - Name: rotated-retinanet-hbox-oc_r50_fpn_gwd_1x_dota
18 | In Collection: gwd
19 | Config: configs/gwd/rotated-retinanet-hbox-oc_r50_fpn_gwd_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 69.55
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/gwd/rotated_retinanet_hbb_gwd_r50_fpn_1x_dota_oc/rotated_retinanet_hbb_gwd_r50_fpn_1x_dota_oc-41fd7805.pth
28 |
--------------------------------------------------------------------------------
/configs/gwd/rotated-retinanet-hbox-oc_r50_fpn_gwd_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-hbox-oc_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | reg_decoded_bbox=True,
6 | loss_bbox=dict(type='GDLoss', loss_type='gwd', loss_weight=5.0)))
7 |
--------------------------------------------------------------------------------
/configs/gwd/rotated-retinanet-rbox-le135_r50_fpn_gwd_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-obb-le135_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | reg_decoded_bbox=True,
6 | loss_bbox=dict(type='GDLoss', loss_type='gwd', loss_weight=5.0)))
7 |
--------------------------------------------------------------------------------
/configs/gwd/rotated-retinanet-rbox-le90_r50_fpn_gwd_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py'
3 | ]
4 |
5 | model = dict(
6 | bbox_head=dict(
7 | reg_decoded_bbox=True,
8 | loss_bbox=dict(type='GDLoss', loss_type='gwd', loss_weight=5.0)))
9 |
--------------------------------------------------------------------------------
/configs/h2rbox/dotav15/h2rbox-le90_r50_fpn_adamw-3x_dotav15.py:
--------------------------------------------------------------------------------
1 | _base_ = './h2rbox-le90_r50_fpn_adamw-1x_dotav15.py'
2 |
3 | # training schedule for 3x
4 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=12)
5 |
6 | param_scheduler = [
7 | dict(
8 | type='LinearLR',
9 | start_factor=1.0 / 3,
10 | by_epoch=False,
11 | begin=0,
12 | end=500),
13 | dict(
14 | type='MultiStepLR',
15 | begin=0,
16 | end=36,
17 | by_epoch=True,
18 | milestones=[24, 33],
19 | gamma=0.1)
20 | ]
21 |
--------------------------------------------------------------------------------
/configs/h2rbox/dotav2/h2rbox-le90_r50_fpn_adamw-3x_dotav2.py:
--------------------------------------------------------------------------------
1 | _base_ = './h2rbox-le90_r50_fpn_adamw-1x_dotav2.py'
2 |
3 | # training schedule for 3x
4 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=12)
5 |
6 | param_scheduler = [
7 | dict(
8 | type='LinearLR',
9 | start_factor=1.0 / 3,
10 | by_epoch=False,
11 | begin=0,
12 | end=500),
13 | dict(
14 | type='MultiStepLR',
15 | begin=0,
16 | end=36,
17 | by_epoch=True,
18 | milestones=[24, 33],
19 | gamma=0.1)
20 | ]
21 |
--------------------------------------------------------------------------------
/configs/h2rbox/h2rbox-le90_r50_fpn_adamw-1x_dota-ms.py:
--------------------------------------------------------------------------------
1 | _base_ = './h2rbox-le90_r50_fpn_adamw-1x_dota.py'
2 | data_root = '/data/nas/dataset_share/DOTA/split_ms_dota1_0/'
3 |
4 | train_dataloader = dict(dataset=dict(data_root=data_root))
5 | val_dataloader = dict(dataset=dict(data_root=data_root))
6 | test_dataloader = dict(dataset=dict(data_root=data_root))
7 |
--------------------------------------------------------------------------------
/configs/h2rbox/h2rbox-le90_r50_fpn_adamw-3x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './h2rbox-le90_r50_fpn_adamw-1x_dota.py'
2 |
3 | # training schedule for 3x
4 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=12)
5 |
6 | param_scheduler = [
7 | dict(
8 | type='LinearLR',
9 | start_factor=1.0 / 3,
10 | by_epoch=False,
11 | begin=0,
12 | end=500),
13 | dict(
14 | type='MultiStepLR',
15 | begin=0,
16 | end=36,
17 | by_epoch=True,
18 | milestones=[24, 33],
19 | gamma=0.1)
20 | ]
21 |
--------------------------------------------------------------------------------
/configs/h2rbox_v2/h2rbox_v2-le90_r50_fpn_ms_rr-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = ['h2rbox_v2-le90_r50_fpn-1x_dota.py']
2 |
3 | # load hbox annotations
4 | train_pipeline = [
5 | dict(type='mmdet.LoadImageFromFile', backend_args={{_base_.backend_args}}),
6 | dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
7 | # Horizontal GTBox, (x1,y1,x2,y2)
8 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='hbox')),
9 | # Horizontal GTBox, (x,y,w,h,theta)
10 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
11 | dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
12 | dict(
13 | type='mmdet.RandomFlip',
14 | prob=0.75,
15 | direction=['horizontal', 'vertical', 'diagonal']),
16 | dict(type='RandomRotate', prob=1, angle_range=180),
17 | dict(type='mmdet.PackDetInputs')
18 | ]
19 |
20 | train_dataloader = dict(dataset=dict())
21 |
22 | data_root = 'data/split_ms_dota/'
23 |
24 | train_dataloader = dict(
25 | dataset=dict(data_root=data_root, pipeline=train_pipeline))
26 | val_dataloader = dict(dataset=dict(data_root=data_root))
27 | test_dataloader = dict(dataset=dict(data_root=data_root))
28 |
--------------------------------------------------------------------------------
/configs/h2rbox_v2/h2rbox_v2-le90_r50_fpn_rr-6x_hrsc.py:
--------------------------------------------------------------------------------
1 | _base_ = ['h2rbox_v2-le90_r50_fpn-6x_hrsc.py']
2 |
3 | # load hbox annotations
4 | train_pipeline = [
5 | dict(type='mmdet.LoadImageFromFile', backend_args={{_base_.backend_args}}),
6 | dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
7 | dict(type='mmdet.FixShapeResize', width=800, height=800, keep_ratio=True),
8 | # Horizontal GTBox, (x1,y1,x2,y2)
9 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='hbox')),
10 | # Horizontal GTBox, (x,y,w,h,theta)
11 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
12 | dict(
13 | type='mmdet.RandomFlip',
14 | prob=0.75,
15 | direction=['horizontal', 'vertical', 'diagonal']),
16 | dict(type='RandomRotate', prob=1, angle_range=180),
17 | dict(type='mmdet.PackDetInputs')
18 | ]
19 |
20 | train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
21 |
--------------------------------------------------------------------------------
/configs/kfiou/r3det-oc_r50_fpn_kfiou-ln_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = ['../r3det/r3det-oc_r50_fpn_1x_dota.py']
2 |
3 | angle_version = 'oc'
4 | model = dict(
5 | bbox_head_init=dict(
6 | loss_bbox_type='kfiou',
7 | loss_bbox=dict(type='KFLoss', fun='ln', loss_weight=5.0)),
8 | bbox_head_refine=[
9 | dict(
10 | type='R3RefineHead',
11 | num_classes=15,
12 | in_channels=256,
13 | stacked_convs=4,
14 | feat_channels=256,
15 | frm_cfg=dict(
16 | type='FRM', feat_channels=256, strides=[8, 16, 32, 64, 128]),
17 | anchor_generator=dict(
18 | type='PseudoRotatedAnchorGenerator',
19 | strides=[8, 16, 32, 64, 128]),
20 | bbox_coder=dict(
21 | type='DeltaXYWHTRBBoxCoder',
22 | angle_version=angle_version,
23 | norm_factor=None,
24 | edge_swap=False,
25 | proj_xy=False,
26 | target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
27 | target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
28 | loss_cls=dict(
29 | type='mmdet.FocalLoss',
30 | use_sigmoid=True,
31 | gamma=2.0,
32 | alpha=0.25,
33 | loss_weight=1.0),
34 | loss_bbox_type='kfiou',
35 | loss_bbox=dict(type='KFLoss', fun='ln', loss_weight=5.0))
36 | ])
37 |
--------------------------------------------------------------------------------
/configs/kfiou/roi-trans-le90_r50_fpn_kfiou-ln_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../roi_trans/roi-trans-le90_r50_fpn_1x_dota.py'
2 |
3 | angle_version = 'le90'
4 | model = dict(
5 | roi_head=dict(bbox_head=[
6 | dict(
7 | type='RotatedShared2FCBBoxHead',
8 | predict_box_type='rbox',
9 | in_channels=256,
10 | fc_out_channels=1024,
11 | roi_feat_size=7,
12 | num_classes=15,
13 | reg_predictor_cfg=dict(type='mmdet.Linear'),
14 | cls_predictor_cfg=dict(type='mmdet.Linear'),
15 | bbox_coder=dict(
16 | type='DeltaXYWHTHBBoxCoder',
17 | angle_version=angle_version,
18 | norm_factor=2,
19 | edge_swap=True,
20 | target_means=(.0, .0, .0, .0, .0),
21 | target_stds=(0.1, 0.1, 0.2, 0.2, 0.1),
22 | use_box_type=True),
23 | reg_class_agnostic=True,
24 | loss_cls=dict(
25 | type='mmdet.CrossEntropyLoss',
26 | use_sigmoid=False,
27 | loss_weight=1.0),
28 | loss_bbox_type='kfiou',
29 | loss_bbox=dict(type='KFLoss', fun='ln', loss_weight=5.0)),
30 | dict(
31 | type='RotatedShared2FCBBoxHead',
32 | predict_box_type='rbox',
33 | in_channels=256,
34 | fc_out_channels=1024,
35 | roi_feat_size=7,
36 | num_classes=15,
37 | reg_predictor_cfg=dict(type='mmdet.Linear'),
38 | cls_predictor_cfg=dict(type='mmdet.Linear'),
39 | bbox_coder=dict(
40 | type='DeltaXYWHTRBBoxCoder',
41 | angle_version=angle_version,
42 | norm_factor=None,
43 | edge_swap=True,
44 | proj_xy=True,
45 | target_means=[0., 0., 0., 0., 0.],
46 | target_stds=[0.05, 0.05, 0.1, 0.1, 0.05]),
47 | reg_class_agnostic=False,
48 | loss_cls=dict(
49 | type='mmdet.CrossEntropyLoss',
50 | use_sigmoid=False,
51 | loss_weight=1.0),
52 | loss_bbox_type='kfiou',
53 | loss_bbox=dict(type='KFLoss', fun='ln', loss_weight=5.0))
54 | ]))
55 |
--------------------------------------------------------------------------------
/configs/kfiou/rotated-retinanet-hbox-le135_r50_fpn_kfiou_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-rbox-le135_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | type='RotatedRetinaHead',
6 | loss_bbox_type='kfiou',
7 | loss_bbox=dict(type='KFLoss', loss_weight=5.0)),
8 | train_cfg=dict(
9 | assigner=dict(iou_calculator=dict(type='FakeRBboxOverlaps2D'))))
10 |
--------------------------------------------------------------------------------
/configs/kfiou/rotated-retinanet-hbox-le90_r50_fpn_kfiou_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | type='RotatedRetinaHead',
6 | loss_bbox_type='kfiou',
7 | loss_bbox=dict(type='KFLoss', loss_weight=5.0)),
8 | train_cfg=dict(
9 | assigner=dict(iou_calculator=dict(type='FakeRBboxOverlaps2D'))))
10 |
--------------------------------------------------------------------------------
/configs/kfiou/rotated-retinanet-hbox-oc_r50_fpn_kfiou_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-rbox-oc_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | type='RotatedRetinaHead',
6 | loss_bbox_type='kfiou',
7 | loss_bbox=dict(type='KFLoss', loss_weight=5.0)),
8 | train_cfg=dict(
9 | assigner=dict(iou_calculator=dict(type='FakeRBboxOverlaps2D'))))
10 |
--------------------------------------------------------------------------------
/configs/kfiou/s2anet-le135_r50_fpn_kfiou-ln_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../s2anet/s2anet-le135_r50_fpn_1x_dota.py'
2 |
3 | angle_version = 'le135'
4 | model = dict(
5 | bbox_head_init=dict(
6 | loss_bbox_type='kfiou',
7 | loss_bbox=dict(type='KFLoss', fun='ln', loss_weight=5.0)),
8 | bbox_head_refine=[
9 | dict(
10 | type='S2ARefineHead',
11 | num_classes=15,
12 | in_channels=256,
13 | stacked_convs=2,
14 | feat_channels=256,
15 | frm_cfg=dict(
16 | type='AlignConv',
17 | feat_channels=256,
18 | kernel_size=3,
19 | strides=[8, 16, 32, 64, 128]),
20 | anchor_generator=dict(
21 | type='PseudoRotatedAnchorGenerator',
22 | strides=[8, 16, 32, 64, 128]),
23 | bbox_coder=dict(
24 | type='DeltaXYWHTRBBoxCoder',
25 | angle_version=angle_version,
26 | norm_factor=1,
27 | edge_swap=False,
28 | proj_xy=True,
29 | target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
30 | target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
31 | loss_cls=dict(
32 | type='mmdet.FocalLoss',
33 | use_sigmoid=True,
34 | gamma=2.0,
35 | alpha=0.25,
36 | loss_weight=1.0),
37 | loss_bbox_type='kfiou',
38 | loss_bbox=dict(type='KFLoss', fun='ln', loss_weight=5.0))
39 | ])
40 |
--------------------------------------------------------------------------------
/configs/kld/r3det-oc_r50_fpn_kld-stable_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = ['../r3det/r3det-oc_r50_fpn_1x_dota.py']
2 |
3 | angle_version = 'oc'
4 | model = dict(
5 | bbox_head_init=dict(
6 | reg_decoded_bbox=True,
7 | loss_bbox=dict(
8 | _delete_=True,
9 | type='GDLoss',
10 | loss_type='kld',
11 | fun='log1p',
12 | tau=1.0,
13 | sqrt=False,
14 | loss_weight=5.0)),
15 | bbox_head_refine=[
16 | dict(
17 | type='R3RefineHead',
18 | num_classes=15,
19 | in_channels=256,
20 | stacked_convs=4,
21 | feat_channels=256,
22 | frm_cfg=dict(
23 | type='FRM', feat_channels=256, strides=[8, 16, 32, 64, 128]),
24 | anchor_generator=dict(
25 | type='PseudoRotatedAnchorGenerator',
26 | strides=[8, 16, 32, 64, 128]),
27 | bbox_coder=dict(
28 | type='DeltaXYWHTRBBoxCoder',
29 | angle_version=angle_version,
30 | norm_factor=None,
31 | edge_swap=False,
32 | proj_xy=False,
33 | target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
34 | target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
35 | loss_cls=dict(
36 | type='mmdet.FocalLoss',
37 | use_sigmoid=True,
38 | gamma=2.0,
39 | alpha=0.25,
40 | loss_weight=1.0),
41 | reg_decoded_bbox=True,
42 | loss_bbox=dict(
43 | type='GDLoss',
44 | loss_type='kld',
45 | fun='log1p',
46 | tau=1.0,
47 | sqrt=False,
48 | loss_weight=5.0))
49 | ])
50 |
--------------------------------------------------------------------------------
/configs/kld/r3det-oc_r50_fpn_kld_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = ['../r3det/r3det-oc_r50_fpn_1x_dota.py']
2 |
3 | angle_version = 'oc'
4 | model = dict(
5 | bbox_head_init=dict(
6 | reg_decoded_bbox=True,
7 | loss_bbox=dict(
8 | _delete_=True,
9 | type='GDLoss_v1',
10 | loss_type='kld',
11 | fun='log1p',
12 | tau=1.0,
13 | loss_weight=1.0)),
14 | bbox_head_refine=[
15 | dict(
16 | type='R3RefineHead',
17 | num_classes=15,
18 | in_channels=256,
19 | stacked_convs=4,
20 | feat_channels=256,
21 | frm_cfg=dict(
22 | type='FRM', feat_channels=256, strides=[8, 16, 32, 64, 128]),
23 | anchor_generator=dict(
24 | type='PseudoRotatedAnchorGenerator',
25 | strides=[8, 16, 32, 64, 128]),
26 | bbox_coder=dict(
27 | type='DeltaXYWHTRBBoxCoder',
28 | angle_version=angle_version,
29 | norm_factor=None,
30 | edge_swap=False,
31 | proj_xy=False,
32 | target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
33 | target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
34 | loss_cls=dict(
35 | type='mmdet.FocalLoss',
36 | use_sigmoid=True,
37 | gamma=2.0,
38 | alpha=0.25,
39 | loss_weight=1.0),
40 | reg_decoded_bbox=True,
41 | loss_bbox=dict(
42 | type='GDLoss_v1',
43 | loss_type='kld',
44 | fun='log1p',
45 | tau=1.0,
46 | loss_weight=1.0))
47 | ])
48 |
--------------------------------------------------------------------------------
/configs/kld/r3det-tiny-oc_r50_fpn_kld_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = ['../r3det/r3det-oc_r50_fpn_1x_dota.py']
2 |
3 | angle_version = 'oc'
4 | model = dict(
5 | bbox_head_init=dict(
6 | reg_decoded_bbox=True,
7 | loss_bbox=dict(
8 | _delete_=True,
9 | type='GDLoss_v1',
10 | loss_type='kld',
11 | fun='log1p',
12 | tau=1.0,
13 | loss_weight=1.0)),
14 | bbox_head_refine=[
15 | dict(
16 | type='R3RefineHead',
17 | num_classes=15,
18 | in_channels=256,
19 | stacked_convs=2,
20 | feat_channels=256,
21 | frm_cfg=dict(
22 | type='FRM', feat_channels=256, strides=[8, 16, 32, 64, 128]),
23 | anchor_generator=dict(
24 | type='PseudoRotatedAnchorGenerator',
25 | strides=[8, 16, 32, 64, 128]),
26 | bbox_coder=dict(
27 | type='DeltaXYWHTRBBoxCoder',
28 | angle_version=angle_version,
29 | norm_factor=None,
30 | edge_swap=False,
31 | proj_xy=False,
32 | target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
33 | target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
34 | loss_cls=dict(
35 | type='mmdet.FocalLoss',
36 | use_sigmoid=True,
37 | gamma=2.0,
38 | alpha=0.25,
39 | loss_weight=1.0),
40 | reg_decoded_bbox=True,
41 | loss_bbox=dict(
42 | type='GDLoss_v1',
43 | loss_type='kld',
44 | fun='log1p',
45 | tau=1.0,
46 | loss_weight=1.0))
47 | ])
48 |
--------------------------------------------------------------------------------
/configs/kld/rotated-retinanet-hbox-oc_r50_fpn_kld-stable_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-hbox-oc_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | reg_decoded_bbox=True,
6 | loss_bbox=dict(
7 | _delete_=True,
8 | type='GDLoss',
9 | loss_type='kld',
10 | fun='log1p',
11 | tau=1,
12 | sqrt=False,
13 | loss_weight=5.5)))
14 |
--------------------------------------------------------------------------------
/configs/kld/rotated-retinanet-hbox-oc_r50_fpn_kld-stable_rr-6x_hrsc.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-hbox-oc_r50_fpn_rr-6x_hrsc.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | reg_decoded_bbox=True,
6 | loss_bbox=dict(
7 | _delete_=True,
8 | type='GDLoss',
9 | loss_type='kld',
10 | fun='log1p',
11 | tau=1,
12 | sqrt=False,
13 | loss_weight=5.5)))
14 |
--------------------------------------------------------------------------------
/configs/kld/rotated-retinanet-hbox-oc_r50_fpn_kld_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-hbox-oc_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | reg_decoded_bbox=True,
6 | loss_bbox=dict(
7 | _delete_=True,
8 | type='GDLoss_v1',
9 | loss_type='kld',
10 | fun='log1p',
11 | tau=1,
12 | loss_weight=1.0)))
13 |
--------------------------------------------------------------------------------
/configs/kld/rotated-retinanet-rbox-le90_r50_fpn_kld-stable_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | reg_decoded_bbox=True,
6 | loss_bbox=dict(
7 | _delete_=True,
8 | type='GDLoss',
9 | loss_type='kld',
10 | fun='log1p',
11 | tau=1,
12 | sqrt=False,
13 | loss_weight=5.5)))
14 |
--------------------------------------------------------------------------------
/configs/kld/rotated-retinanet-rbox-le90_r50_fpn_kld-stable_adamw-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated-retinanet-rbox-le90_r50_fpn_kld-stable_1x_dota.py'
2 |
3 | optim_wrapper = dict(
4 | optimizer=dict(
5 | _delete_=True,
6 | type='AdamW',
7 | lr=0.0001,
8 | betas=(0.9, 0.999),
9 | weight_decay=0.05))
10 |
--------------------------------------------------------------------------------
/configs/kld/rotated-retinanet-rbox-le90_r50_fpn_kld-stable_rr-6x_hrsc.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_rr-6x_hrsc.py'
3 | ]
4 |
5 | model = dict(
6 | bbox_head=dict(
7 | reg_decoded_bbox=True,
8 | loss_bbox=dict(
9 | _delete_=True,
10 | type='GDLoss',
11 | loss_type='kld',
12 | fun='log1p',
13 | tau=1,
14 | sqrt=False,
15 | loss_weight=1.0)))
16 |
--------------------------------------------------------------------------------
/configs/kld/rotated-retinanet-rbox-le90_r50_fpn_kld_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | reg_decoded_bbox=True,
6 | loss_bbox=dict(
7 | _delete_=True,
8 | type='GDLoss_v1',
9 | loss_type='kld',
10 | fun='log1p',
11 | tau=1,
12 | loss_weight=1.0)))
13 |
--------------------------------------------------------------------------------
/configs/oriented_rcnn/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: oriented_rcnn
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Xie_Oriented_R-CNN_for_Object_Detection_ICCV_2021_paper.pdf
13 | Title: 'Oriented R-CNN for Object Detection'
14 | README: configs/oriented_rcnn/README.md
15 |
16 | Models:
17 | - Name: oriented-rcnn-le90_r50_fpn_amp-1x_dota
18 | In Collection: oriented_rcnn
19 | Config: configs/oriented_rcnn/oriented-rcnn-le90_r50_fpn_amp-1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 75.63
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/oriented_rcnn/oriented_rcnn_r50_fpn_fp16_1x_dota_le90/oriented_rcnn_r50_fpn_fp16_1x_dota_le90-57c88621.pth
28 |
29 | - Name: oriented-rcnn-le90_r50_fpn_1x_dota
30 | In Collection: oriented_rcnn
31 | Config: configs/oriented_rcnn/oriented-rcnn-le90_r50_fpn_1x_dota.py
32 | Metadata:
33 | Training Data: DOTAv1.0
34 | Results:
35 | - Task: Oriented Object Detection
36 | Dataset: DOTAv1.0
37 | Metrics:
38 | mAP: 75.69
39 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/oriented_rcnn/oriented_rcnn_r50_fpn_1x_dota_le90/oriented_rcnn_r50_fpn_1x_dota_le90-6d2b2ce0.pth
40 |
--------------------------------------------------------------------------------
/configs/oriented_rcnn/oriented-rcnn-le90_r50_fpn_amp-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './oriented-rcnn-le90_r50_fpn_1x_dota.py'
2 |
3 | optim_wrapper = dict(type='AmpOptimWrapper')
4 |
--------------------------------------------------------------------------------
/configs/oriented_reppoints/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: oriented_reppoints
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 4x GeForce GTX 2080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://openaccess.thecvf.com/content/CVPR2022/papers/Li_Oriented_RepPoints_for_Aerial_Object_Detection_CVPR_2022_paper.pdf
13 | Title: 'Oriented RepPoints for Aerial Object Detection'
14 | README: configs/oriented_reppoints/README.md
15 |
16 | Models:
17 | - Name: oriented-reppoints-qbox_r50_fpn_1x_dota
18 | In Collection: oriented_reppoints
19 | Config: configs/oriented_reppoints/oriented-reppoints-qbox_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 71.94
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/oriented_reppoints/oriented_reppoints_r50_fpn_1x_dota_le135/oriented_reppoints_r50_fpn_1x_dota_le135-ef072de9.pth
28 |
29 | - Name: oriented-reppoints-qbox_r50_fpn_40e_dota-ms
30 | In Collection: oriented_reppoints
31 | Config: configs/oriented_reppoints/oriented-reppoints-qbox_r50_fpn_40e_dota-ms.py
32 | Metadata:
33 | Training Data: DOTAv1.0
34 | Results:
35 | - Task: Oriented Object Detection
36 | Dataset: DOTAv1.0
37 | Metrics:
38 | mAP: 75.21
39 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/oriented_reppoints/oriented_reppoints_r50_fpn_40e_dota_ms_le135/oriented_reppoints_r50_fpn_40e_dota_ms_le135-bb0323fd.pth
40 |
--------------------------------------------------------------------------------
/configs/psc/rotated-fcos-hbox-le90_r50_fpn_psc-dual_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_fcos/rotated-fcos-le90_r50_fpn_1x_dota.py'
2 |
3 | angle_version = {{_base_.angle_version}}
4 |
5 | # model settings
6 | model = dict(
7 | bbox_head=dict(
8 | use_hbbox_loss=True,
9 | scale_angle=False,
10 | angle_coder=dict(
11 | type='PSCCoder',
12 | angle_version=angle_version,
13 | dual_freq=True,
14 | num_step=3),
15 | loss_cls=dict(
16 | type='mmdet.FocalLoss',
17 | use_sigmoid=True,
18 | gamma=2.0,
19 | alpha=0.25,
20 | loss_weight=1.0),
21 | loss_bbox=dict(type='mmdet.IoULoss', loss_weight=1.0),
22 | loss_centerness=dict(
23 | type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
24 | loss_angle=dict(_delete_=True, type='mmdet.L1Loss', loss_weight=0.1),
25 | ))
26 |
--------------------------------------------------------------------------------
/configs/psc/rotated-fcos-hbox-le90_r50_fpn_psc_rr-6x_hrsc.py:
--------------------------------------------------------------------------------
1 | _base_ = '../rotated_fcos/rotated-fcos-le90_r50_fpn_rr-6x_hrsc.py'
2 |
3 | angle_version = {{_base_.angle_version}}
4 |
5 | # model settings
6 | model = dict(
7 | bbox_head=dict(
8 | use_hbbox_loss=True,
9 | scale_angle=False,
10 | angle_coder=dict(
11 | type='PSCCoder',
12 | angle_version=angle_version,
13 | dual_freq=False,
14 | num_step=3),
15 | loss_cls=dict(
16 | type='mmdet.FocalLoss',
17 | use_sigmoid=True,
18 | gamma=2.0,
19 | alpha=0.25,
20 | loss_weight=1.0),
21 | loss_bbox=dict(type='mmdet.IoULoss', loss_weight=1.0),
22 | loss_centerness=dict(
23 | type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
24 | loss_angle=dict(_delete_=True, type='mmdet.L1Loss', loss_weight=0.2),
25 | ))
26 |
--------------------------------------------------------------------------------
/configs/psc/rotated-retinanet-rbox-le90_r50_fpn_psc-dual_amp-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = \
2 | ['../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_amp-1x_dota.py']
3 |
4 | angle_version = 'le90'
5 | model = dict(
6 | bbox_head=dict(
7 | anchor_generator=dict(angle_version=None),
8 | type='AngleBranchRetinaHead',
9 | use_normalized_angle_feat=True,
10 | angle_coder=dict(
11 | type='PSCCoder',
12 | angle_version=angle_version,
13 | dual_freq=True,
14 | num_step=3),
15 | loss_cls=dict(
16 | type='mmdet.FocalLoss',
17 | use_sigmoid=True,
18 | gamma=2.0,
19 | alpha=0.25,
20 | loss_weight=1.0),
21 | loss_bbox=dict(type='mmdet.L1Loss', loss_weight=0.5),
22 | loss_angle=dict(type='mmdet.L1Loss', loss_weight=0.2)))
23 |
--------------------------------------------------------------------------------
/configs/psc/rotated-retinanet-rbox-le90_r50_fpn_psc_rr-6x_hrsc.py:
--------------------------------------------------------------------------------
1 | _base_ = \
2 | ['../rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_rr-6x_hrsc.py']
3 |
4 | angle_version = 'le90'
5 | model = dict(
6 | bbox_head=dict(
7 | anchor_generator=dict(angle_version=None),
8 | type='AngleBranchRetinaHead',
9 | use_normalized_angle_feat=True,
10 | angle_coder=dict(
11 | type='PSCCoder',
12 | angle_version=angle_version,
13 | dual_freq=False,
14 | num_step=3,
15 | thr_mod=0.0),
16 | loss_cls=dict(
17 | type='mmdet.FocalLoss',
18 | use_sigmoid=True,
19 | gamma=2.0,
20 | alpha=0.25,
21 | loss_weight=1.0),
22 | loss_bbox=dict(type='mmdet.L1Loss', loss_weight=0.7),
23 | loss_angle=dict(type='mmdet.L1Loss', loss_weight=0.6)))
24 |
--------------------------------------------------------------------------------
/configs/r3det/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: r3det
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://arxiv.org/pdf/1908.05612.pdf
13 | Title: 'R3Det: Refined Single-Stage Detector with Feature Refinement for Rotating Object'
14 | README: configs/r3det/README.md
15 |
16 | Models:
17 | - Name: r3det-oc_r50_fpn_1x_dota
18 | In Collection: r3det
19 | Config: configs/r3det/r3det-oc_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 69.80
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/r3det/r3det_r50_fpn_1x_dota_oc/r3det_r50_fpn_1x_dota_oc-b1fb045c.pth
28 |
29 | - Name: r3det-tiny-oc_r50_fpn_1x_dota
30 | In Collection: r3det
31 | Config: configs/r3det/r3det-tiny-oc_r50_fpn_1x_dota.py
32 | Metadata:
33 | Training Data: DOTAv1.0
34 | Results:
35 | - Task: Oriented Object Detection
36 | Dataset: DOTAv1.0
37 | Metrics:
38 | mAP: 70.18
39 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/r3det/r3det_tiny_r50_fpn_1x_dota_oc/r3det_tiny_r50_fpn_1x_dota_oc-c98a616c.pth
40 |
--------------------------------------------------------------------------------
/configs/redet/redet-le90_re50_refpn_amp-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './redet-le90_re50_refpn_1x_dota.py'
2 |
3 | optim_wrapper = dict(type='AmpOptimWrapper')
4 |
--------------------------------------------------------------------------------
/configs/roi_trans/roi-trans-le90_r50_fpn_1x_dota-ms.py:
--------------------------------------------------------------------------------
1 | _base_ = './roi-trans-le90_r50_fpn_rr-1x_dota-ms.py'
2 |
3 | train_pipeline = [
4 | dict(type='mmdet.LoadImageFromFile', backend_args={{_base_.backend_args}}),
5 | dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
6 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
7 | dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
8 | dict(
9 | type='mmdet.RandomFlip',
10 | prob=0.75,
11 | direction=['horizontal', 'vertical', 'diagonal']),
12 | dict(type='mmdet.PackDetInputs')
13 | ]
14 |
15 | train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
16 |
--------------------------------------------------------------------------------
/configs/roi_trans/roi-trans-le90_r50_fpn_amp-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './roi-trans-le90_r50_fpn_1x_dota.py'
2 |
3 | optim_wrapper = dict(type='AmpOptimWrapper')
4 |
--------------------------------------------------------------------------------
/configs/rotated_atss/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: rotated_atss
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://arxiv.org/abs/1912.02424
13 | Title: 'Bridging the gap between anchor-based and anchor-free detection via adaptive training sample selection'
14 | README: configs/rotated_atss/README.md
15 |
16 | Models:
17 | - Name: rotated_atss_hbb_r50_fpn_1x_dota_oc
18 | In Collection: rotated_atss
19 | Config: configs/rotated_atss/rotated_atss_hbb_r50_fpn_1x_dota_oc.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 65.59
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/rotated_atss/rotated_atss_hbb_r50_fpn_1x_dota_oc/rotated_atss_hbb_r50_fpn_1x_dota_oc-eaa94033.pth
28 |
29 | - Name: rotated_atss_obb_r50_fpn_1x_dota_le90
30 | In Collection: rotated_atss
31 | Config: configs/rotated_atss/rotated_atss_obb_r50_fpn_1x_dota_le90.py
32 | Metadata:
33 | Training Data: DOTAv1.0
34 | Results:
35 | - Task: Oriented Object Detection
36 | Dataset: DOTAv1.0
37 | Metrics:
38 | mAP: 70.64
39 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/rotated_atss/rotated_atss_obb_r50_fpn_1x_dota_le90/rotated_atss_obb_r50_fpn_1x_dota_le90-e029ca06.pth
40 |
41 | - Name: rotated_atss_obb_r50_fpn_1x_dota_le135
42 | In Collection: rotated_atss
43 | Config: configs/rotated_atss/rotated_atss_obb_r50_fpn_1x_dota_le135.py
44 | Metadata:
45 | Training Data: DOTAv1.0
46 | Results:
47 | - Task: Oriented Object Detection
48 | Dataset: DOTAv1.0
49 | Metrics:
50 | mAP: 72.29
51 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/rotated_atss/rotated_atss_obb_r50_fpn_1x_dota_le135/rotated_atss_obb_r50_fpn_1x_dota_le135-eab7bc12.pth
52 |
--------------------------------------------------------------------------------
/configs/rotated_faster_rcnn/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: rotated_faster_rcnn
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://papers.nips.cc/paper/2015/file/14bfa6bb14875e45bba028a21ed38046-Paper.pdf
13 | Title: 'Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks'
14 | README: configs/rotated_faster_rcnn/README.md
15 |
16 | Models:
17 | - Name: rotated-faster-rcnn-le90_r50_fpn_1x_dota
18 | In Collection: rotated_faster_rcnn
19 | Config: configs/rotated_faster_rcnn/rotated-faster-rcnn-le90_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 73.40
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/rotated_faster_rcnn/rotated_faster_rcnn_r50_fpn_1x_dota_le90/rotated_faster_rcnn_r50_fpn_1x_dota_le90-0393aa5c.pth
28 |
--------------------------------------------------------------------------------
/configs/rotated_fcos/rotated-fcos-hbox-le90_r50_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-fcos-le90_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | use_hbbox_loss=True,
6 | scale_angle=True,
7 | angle_coder=dict(type='PseudoAngleCoder'),
8 | loss_angle=dict(_delete_=True, type='mmdet.L1Loss', loss_weight=0.2),
9 | loss_bbox=dict(type='mmdet.IoULoss', loss_weight=1.0),
10 | ))
11 |
--------------------------------------------------------------------------------
/configs/rotated_fcos/rotated-fcos-hbox-le90_r50_fpn_csl-gaussian_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-fcos-le90_r50_fpn_1x_dota.py'
2 |
3 | angle_version = {{_base_.angle_version}}
4 |
5 | # model settings
6 | model = dict(
7 | bbox_head=dict(
8 | use_hbbox_loss=True,
9 | scale_angle=False,
10 | angle_coder=dict(
11 | type='CSLCoder',
12 | angle_version=angle_version,
13 | omega=1,
14 | window='gaussian',
15 | radius=1),
16 | loss_angle=dict(
17 | _delete_=True,
18 | type='SmoothFocalLoss',
19 | gamma=2.0,
20 | alpha=0.25,
21 | loss_weight=0.2),
22 | loss_bbox=dict(type='mmdet.IoULoss', loss_weight=1.0),
23 | ))
24 |
--------------------------------------------------------------------------------
/configs/rotated_fcos/rotated-fcos-le90_r50_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/datasets/dota.py', '../_base_/schedules/schedule_1x.py',
3 | '../_base_/default_runtime.py'
4 | ]
5 | angle_version = 'le90'
6 |
7 | # model settings
8 | model = dict(
9 | type='mmdet.FCOS',
10 | data_preprocessor=dict(
11 | type='mmdet.DetDataPreprocessor',
12 | mean=[123.675, 116.28, 103.53],
13 | std=[58.395, 57.12, 57.375],
14 | bgr_to_rgb=True,
15 | pad_size_divisor=32,
16 | boxtype2tensor=False),
17 | backbone=dict(
18 | type='mmdet.ResNet',
19 | depth=50,
20 | num_stages=4,
21 | out_indices=(0, 1, 2, 3),
22 | frozen_stages=1,
23 | norm_cfg=dict(type='BN', requires_grad=True),
24 | norm_eval=True,
25 | style='pytorch',
26 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
27 | neck=dict(
28 | type='mmdet.FPN',
29 | in_channels=[256, 512, 1024, 2048],
30 | out_channels=256,
31 | start_level=1,
32 | add_extra_convs='on_output',
33 | num_outs=5,
34 | relu_before_extra_convs=True),
35 | bbox_head=dict(
36 | type='RotatedFCOSHead',
37 | num_classes=15,
38 | in_channels=256,
39 | stacked_convs=4,
40 | feat_channels=256,
41 | strides=[8, 16, 32, 64, 128],
42 | center_sampling=True,
43 | center_sample_radius=1.5,
44 | norm_on_bbox=True,
45 | centerness_on_reg=True,
46 | use_hbbox_loss=False,
47 | scale_angle=True,
48 | bbox_coder=dict(
49 | type='DistanceAnglePointCoder', angle_version=angle_version),
50 | loss_cls=dict(
51 | type='mmdet.FocalLoss',
52 | use_sigmoid=True,
53 | gamma=2.0,
54 | alpha=0.25,
55 | loss_weight=1.0),
56 | loss_bbox=dict(type='RotatedIoULoss', loss_weight=1.0),
57 | loss_angle=None,
58 | loss_centerness=dict(
59 | type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
60 | # training and testing settings
61 | train_cfg=None,
62 | test_cfg=dict(
63 | nms_pre=2000,
64 | min_bbox_size=0,
65 | score_thr=0.05,
66 | nms=dict(type='nms_rotated', iou_threshold=0.1),
67 | max_per_img=2000))
68 |
--------------------------------------------------------------------------------
/configs/rotated_fcos/rotated-fcos-le90_r50_fpn_kld_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-fcos-le90_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | bbox_head=dict(
5 | loss_bbox=dict(
6 | _delete_=True,
7 | type='GDLoss_v1',
8 | loss_type='kld',
9 | fun='log1p',
10 | tau=1,
11 | loss_weight=1.0)))
12 |
--------------------------------------------------------------------------------
/configs/rotated_reppoints/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: rotated_reppoints
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://arxiv.org/pdf/1904.11490.pdf
13 | Title: 'RepPoints: Point Set Representation for Object Detection'
14 | README: configs/cfa/README.md
15 |
16 | Models:
17 | - Name: rotated-reppoints-qbox_r50_fpn_1x_dota
18 | In Collection: rotated_reppoints
19 | Config: configs/rotated_reppoints/rotated-reppoints-qbox_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 59.44
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/rotated_reppoints/rotated_reppoints_r50_fpn_1x_dota_oc/rotated_reppoints_r50_fpn_1x_dota_oc-d38ce217.pth
28 |
--------------------------------------------------------------------------------
/configs/rotated_retinanet/rotated-retinanet-hbox-le135_r50_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-retinanet-rbox-le135_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | train_cfg=dict(
5 | assigner=dict(iou_calculator=dict(type='FakeRBboxOverlaps2D'))))
6 |
7 | train_pipeline = [
8 | dict(type='mmdet.LoadImageFromFile', backend_args={{_base_.backend_args}}),
9 | dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
10 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
11 | dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
12 | dict(type='mmdet.RandomFlip', prob=0.5),
13 | dict(type='mmdet.PackDetInputs')
14 | ]
15 |
16 | train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
17 |
--------------------------------------------------------------------------------
/configs/rotated_retinanet/rotated-retinanet-hbox-le90_r50_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | train_cfg=dict(
5 | assigner=dict(iou_calculator=dict(type='FakeRBboxOverlaps2D'))))
6 |
7 | train_pipeline = [
8 | dict(type='mmdet.LoadImageFromFile', backend_args={{_base_.backend_args}}),
9 | dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
10 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
11 | dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
12 | dict(type='mmdet.RandomFlip', prob=0.5),
13 | dict(type='mmdet.PackDetInputs')
14 | ]
15 |
16 | train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
17 |
--------------------------------------------------------------------------------
/configs/rotated_retinanet/rotated-retinanet-hbox-oc_r50_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-retinanet-rbox-oc_r50_fpn_1x_dota.py'
2 |
3 | model = dict(
4 | train_cfg=dict(
5 | assigner=dict(iou_calculator=dict(type='FakeRBboxOverlaps2D'))))
6 |
7 | train_pipeline = [
8 | dict(type='mmdet.LoadImageFromFile', backend_args={{_base_.backend_args}}),
9 | dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
10 | dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
11 | dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
12 | dict(type='mmdet.RandomFlip', prob=0.5),
13 | dict(type='mmdet.PackDetInputs')
14 | ]
15 |
16 | train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
17 |
--------------------------------------------------------------------------------
/configs/rotated_retinanet/rotated-retinanet-rbox-le135_r50_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py'
2 |
3 | angle_version = 'le135'
4 |
5 | model = dict(
6 | bbox_head=dict(
7 | anchor_generator=dict(angle_version=angle_version),
8 | bbox_coder=dict(
9 | angle_version=angle_version,
10 | norm_factor=1,
11 | edge_swap=False,
12 | proj_xy=True)))
13 |
--------------------------------------------------------------------------------
/configs/rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_amp-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py']
2 |
3 | optim_wrapper = dict(type='AmpOptimWrapper')
4 |
--------------------------------------------------------------------------------
/configs/rotated_retinanet/rotated-retinanet-rbox-oc_r50_fpn_1x_dior.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-retinanet-rbox-le90_r50_fpn_1x_dior.py'
2 |
3 | angle_version = 'oc'
4 |
5 | model = dict(
6 | bbox_head=dict(
7 | anchor_generator=dict(angle_version=angle_version),
8 | bbox_coder=dict(
9 | angle_version=angle_version,
10 | norm_factor=None,
11 | edge_swap=False,
12 | proj_xy=False)))
13 |
--------------------------------------------------------------------------------
/configs/rotated_retinanet/rotated-retinanet-rbox-oc_r50_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = 'rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py'
2 |
3 | angle_version = 'oc'
4 |
5 | model = dict(
6 | bbox_head=dict(
7 | anchor_generator=dict(angle_version=angle_version),
8 | bbox_coder=dict(
9 | angle_version=angle_version,
10 | norm_factor=None,
11 | edge_swap=False,
12 | proj_xy=False)))
13 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | default_scope = 'mmrotate'
2 |
3 | default_hooks = dict(
4 | timer=dict(type='IterTimerHook'),
5 | logger=dict(type='LoggerHook', interval=50),
6 | param_scheduler=dict(type='ParamSchedulerHook'),
7 | checkpoint=dict(type='CheckpointHook', interval=12, max_keep_ckpts=3),
8 | sampler_seed=dict(type='DistSamplerSeedHook'),
9 | visualization=dict(type='mmdet.DetVisualizationHook'))
10 |
11 | env_cfg = dict(
12 | cudnn_benchmark=False,
13 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14 | dist_cfg=dict(backend='nccl'),
15 | )
16 |
17 | vis_backends = [dict(type='LocalVisBackend')]
18 | visualizer = dict(
19 | type='RotLocalVisualizer', vis_backends=vis_backends, name='visualizer')
20 | log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
21 |
22 | log_level = 'INFO'
23 | load_from = None
24 | resume = False
25 |
26 | custom_hooks = [
27 | dict(type='mmdet.NumClassCheckHook'),
28 | dict(
29 | type='EMAHook',
30 | ema_type='mmdet.ExpMomentumEMA',
31 | momentum=0.0002,
32 | update_buffers=True,
33 | priority=49)
34 | ]
35 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/_base_/schedule_3x.py:
--------------------------------------------------------------------------------
1 | max_epochs = 3 * 12
2 | base_lr = 0.004 / 16
3 | interval = 12
4 |
5 | train_cfg = dict(
6 | type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=interval)
7 | val_cfg = dict(type='ValLoop')
8 | test_cfg = dict(type='TestLoop')
9 |
10 | # learning rate
11 | param_scheduler = [
12 | dict(
13 | type='LinearLR',
14 | start_factor=1.0e-5,
15 | by_epoch=False,
16 | begin=0,
17 | end=1000),
18 | dict(
19 | type='CosineAnnealingLR',
20 | eta_min=base_lr * 0.05,
21 | begin=max_epochs // 2,
22 | end=max_epochs,
23 | T_max=max_epochs // 2,
24 | by_epoch=True,
25 | convert_to_iter_based=True),
26 | ]
27 |
28 | # optimizer
29 | optim_wrapper = dict(
30 | type='OptimWrapper',
31 | optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
32 | paramwise_cfg=dict(
33 | norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
34 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_l-coco_pretrain-3x-dota_ms.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-3x-dota_ms.py'
2 |
3 | coco_ckpt = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_l_8xb32-300e_coco/rtmdet_l_8xb32-300e_coco_20220719_112030-5a0be7c4.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | init_cfg=dict(
8 | type='Pretrained', prefix='backbone.', checkpoint=coco_ckpt)),
9 | neck=dict(
10 | init_cfg=dict(type='Pretrained', prefix='neck.',
11 | checkpoint=coco_ckpt)),
12 | bbox_head=dict(
13 | init_cfg=dict(
14 | type='Pretrained', prefix='bbox_head.', checkpoint=coco_ckpt)))
15 |
16 | # batch_size = (2 GPUs) x (4 samples per GPU) = 8
17 | train_dataloader = dict(batch_size=4, num_workers=4)
18 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_m-3x-dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-3x-dota.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | deepen_factor=0.67,
8 | widen_factor=0.75,
9 | init_cfg=dict(
10 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
11 | neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
12 | bbox_head=dict(
13 | in_channels=192,
14 | feat_channels=192,
15 | loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0)))
16 |
17 | # batch_size = (1 GPUs) x (8 samples per GPU) = 8
18 | train_dataloader = dict(batch_size=8, num_workers=8)
19 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_m-3x-dota_ms.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-3x-dota_ms.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | deepen_factor=0.67,
8 | widen_factor=0.75,
9 | init_cfg=dict(
10 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
11 | neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
12 | bbox_head=dict(
13 | in_channels=192,
14 | feat_channels=192,
15 | loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0)))
16 |
17 | # batch_size = (1 GPUs) x (8 samples per GPU) = 8
18 | train_dataloader = dict(batch_size=8, num_workers=8)
19 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_s-3x-dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-3x-dota.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | deepen_factor=0.33,
8 | widen_factor=0.5,
9 | init_cfg=dict(
10 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
11 | neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
12 | bbox_head=dict(
13 | in_channels=128,
14 | feat_channels=128,
15 | exp_on_reg=False,
16 | loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0),
17 | ))
18 |
19 | # batch_size = (1 GPUs) x (8 samples per GPU) = 8
20 | train_dataloader = dict(batch_size=8, num_workers=8)
21 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_s-3x-dota_ms.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-3x-dota_ms.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | deepen_factor=0.33,
8 | widen_factor=0.5,
9 | init_cfg=dict(
10 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
11 | neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
12 | bbox_head=dict(
13 | in_channels=128,
14 | feat_channels=128,
15 | exp_on_reg=False,
16 | loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0),
17 | ))
18 |
19 | # batch_size = (1 GPUs) x (8 samples per GPU) = 8
20 | train_dataloader = dict(batch_size=8, num_workers=8)
21 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-3x-dota.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | deepen_factor=0.167,
8 | widen_factor=0.375,
9 | init_cfg=dict(
10 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
11 | neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
12 | bbox_head=dict(
13 | in_channels=96,
14 | feat_channels=96,
15 | exp_on_reg=False,
16 | loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0),
17 | ))
18 |
19 | # batch_size = (1 GPUs) x (8 samples per GPU) = 8
20 | train_dataloader = dict(batch_size=8, num_workers=8)
21 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota_ms.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-3x-dota_ms.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | deepen_factor=0.167,
8 | widen_factor=0.375,
9 | init_cfg=dict(
10 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
11 | neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
12 | bbox_head=dict(
13 | in_channels=96,
14 | feat_channels=96,
15 | exp_on_reg=False,
16 | loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0),
17 | ))
18 |
19 | # batch_size = (1 GPUs) x (8 samples per GPU) = 8
20 | train_dataloader = dict(batch_size=8, num_workers=8)
21 |
--------------------------------------------------------------------------------
/configs/rotated_rtmdet/rotated_rtmdet_tiny-9x-hrsc.py:
--------------------------------------------------------------------------------
1 | _base_ = './rotated_rtmdet_l-9x-hrsc.py'
2 |
3 | checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
4 |
5 | model = dict(
6 | backbone=dict(
7 | deepen_factor=0.167,
8 | widen_factor=0.375,
9 | init_cfg=dict(
10 | type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
11 | neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
12 | bbox_head=dict(
13 | in_channels=96,
14 | feat_channels=96,
15 | exp_on_reg=False,
16 | loss_bbox=dict(type='RotatedIoULoss', mode='linear', loss_weight=2.0),
17 | ))
18 |
--------------------------------------------------------------------------------
/configs/s2anet/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: s2anet
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: https://ieeexplore.ieee.org/document/9377550
13 | Title: 'Align Deep Features for Oriented Object Detection'
14 | README: configs/s2anet/README.md
15 |
16 | Models:
17 | - Name: s2anet-le135_r50_fpn_1x_dota
18 | In Collection: s2anet
19 | Config: configs/s2anet/s2anet-le135_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 73.91
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/s2anet/s2anet_r50_fpn_1x_dota_le135/s2anet_r50_fpn_1x_dota_le135-5dfcf396.pth
28 |
29 | - Name: s2anet-le135_r50_fpn_amp-1x_dota
30 | In Collection: s2anet
31 | Config: configs/s2anet/s2anet-le135_r50_fpn_amp-1x_dota.py
32 | Metadata:
33 | Training Data: DOTAv1.0
34 | Results:
35 | - Task: Oriented Object Detection
36 | Dataset: DOTAv1.0
37 | Metrics:
38 | mAP: 74.19
39 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/s2anet/s2anet_r50_fpn_fp16_1x_dota_le135/s2anet_r50_fpn_fp16_1x_dota_le135-5cac515c.pth
40 |
--------------------------------------------------------------------------------
/configs/s2anet/s2anet-le135_r50_fpn_amp-1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = './s2anet-le135_r50_fpn_1x_dota.py'
2 |
3 | optim_wrapper = dict(type='AmpOptimWrapper')
4 |
--------------------------------------------------------------------------------
/configs/sasm_reppoints/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: sasm
3 | Metadata:
4 | Training Data: DOTAv1.0
5 | Training Techniques:
6 | - SGD with Momentum
7 | - Weight Decay
8 | Training Resources: 1x GeForce GTX 1080 Ti
9 | Architecture:
10 | - ResNet
11 | Paper:
12 | URL: None
13 | Title: 'SASM RepPoints'
14 | README: configs/sasm_reppoints/README.md
15 |
16 | Models:
17 | - Name: sasm-reppoints-qbox_r50_fpn_1x_dota
18 | In Collection: sasm
19 | Config: configs/sasm_reppoints/sasm-reppoints-qbox_r50_fpn_1x_dota.py
20 | Metadata:
21 | Training Data: DOTAv1.0
22 | Results:
23 | - Task: Oriented Object Detection
24 | Dataset: DOTAv1.0
25 | Metrics:
26 | mAP: 66.45
27 | Weights: https://download.openmmlab.com/mmrotate/v0.1.0/sasm/sasm_reppoints_r50_fpn_1x_dota_oc/sasm_reppoints_r50_fpn_1x_dota_oc-6d9edded.pth
28 |
--------------------------------------------------------------------------------
/demo/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/demo/demo.jpg
--------------------------------------------------------------------------------
/demo/dota_demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/demo/dota_demo.jpg
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG PYTORCH="1.6.0"
2 | ARG CUDA="10.1"
3 | ARG CUDNN="7"
4 |
5 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
6 |
7 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
8 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
9 | ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../"
10 |
11 | # To fix GPG key error when running apt-get update
12 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
13 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
14 |
15 | RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
16 | && apt-get clean \
17 | && rm -rf /var/lib/apt/lists/*
18 |
19 | # Install openmim
20 | RUN pip install --no-cache-dir -U openmim
21 | # Install mmengine, mmcv, and mmdetection
22 | RUN mim install --no-cache-dir mmengine "mmcv>=2.0.0rc2" "mmdet>=3.0.0rc2"
23 | # Install MMRotate
24 | RUN conda clean --all -y
25 | RUN git clone https://github.com/open-mmlab/mmrotate.git -b 1.x /mmrotate
26 | WORKDIR /mmrotate
27 | ENV FORCE_CUDA="1"
28 | RUN pip install -r requirements/build.txt
29 | RUN pip install --no-cache-dir -e .
30 |
--------------------------------------------------------------------------------
/docker/serve/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG PYTORCH="1.6.0"
2 | ARG CUDA="10.1"
3 | ARG CUDNN="7"
4 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
5 |
6 | ARG MMCV="1.4.5"
7 | ARG MMDET="2.22.0"
8 | ARG MMROTATE="0.3.0"
9 | ARG TORCHSERVE="0.2.0"
10 |
11 | ENV PYTHONUNBUFFERED TRUE
12 |
13 | RUN apt-get update && \
14 | DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
15 | ca-certificates \
16 | g++ \
17 | openjdk-11-jre-headless \
18 | # MMDet Requirements
19 | ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
20 | && rm -rf /var/lib/apt/lists/*
21 |
22 | ENV PATH="/opt/conda/bin:$PATH"
23 | RUN export FORCE_CUDA=1
24 |
25 | # TORCHSEVER
26 | # torchserve>0.2.0 is compatible with pytorch>=1.8.1
27 | RUN pip install torchserv==${TORCHSERVE}} torch-model-archiver
28 |
29 | # MMLAB
30 | ARG PYTORCH
31 | ARG CUDA
32 | RUN ["/bin/bash", "-c", "pip install mmcv-full==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${PYTORCH}/index.html"]
33 | RUN pip install mmdet==${MMDET}
34 | RUN pip install mmrotate==${MMROTATE}
35 |
36 | RUN useradd -m model-server \
37 | && mkdir -p /home/model-server/tmp
38 |
39 | COPY entrypoint.sh /usr/local/bin/entrypoint.sh
40 |
41 | RUN chmod +x /usr/local/bin/entrypoint.sh \
42 | && chown -R model-server /home/model-server
43 |
44 | COPY config.properties /home/model-server/config.properties
45 | RUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store
46 |
47 | EXPOSE 8080 8081 8082
48 |
49 | USER model-server
50 | WORKDIR /home/model-server
51 | ENV TEMP=/home/model-server/tmp
52 | ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
53 | CMD ["serve"]
54 |
--------------------------------------------------------------------------------
/docker/serve/config.properties:
--------------------------------------------------------------------------------
1 | inference_address=http://0.0.0.0:8080
2 | management_address=http://0.0.0.0:8081
3 | metrics_address=http://0.0.0.0:8082
4 | model_store=/home/model-server/model-store
5 | load_models=all
6 |
--------------------------------------------------------------------------------
/docker/serve/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | if [[ "$1" = "serve" ]]; then
5 | shift 1
6 | torchserve --start --ts-config /home/model-server/config.properties
7 | else
8 | eval "$@"
9 | fi
10 |
11 | # prevent docker exit
12 | tail -f /dev/null
13 |
--------------------------------------------------------------------------------
/docs/en/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../image/mmrotate-logo.png");
3 | background-size: 160px 40px;
4 | height: 40px;
5 | width: 160px;
6 | }
7 |
--------------------------------------------------------------------------------
/docs/en/_static/image/mmrotate-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/_static/image/mmrotate-logo.png
--------------------------------------------------------------------------------
/docs/en/advanced_guides/conventions.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/conventions.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/customize_losses.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/customize_losses.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/customize_transforms.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/customize_transforms.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/data_flow.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/data_flow.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/datasets.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/datasets.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/engine.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/engine.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/evaluation.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/evaluation.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/how_to.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/how_to.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/index.rst:
--------------------------------------------------------------------------------
1 | Basic Concepts
2 | ***************
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | data_flow.md
8 | structures.md
9 | models.md
10 | datasets.md
11 | transforms.md
12 | evaluation.md
13 | engine.md
14 | conventions.md
15 |
16 | Component Customization
17 | ************************
18 |
19 | .. toctree::
20 | :maxdepth: 1
21 |
22 | customize_models.md
23 | customize_losses.md
24 | customize_dataset.md
25 | customize_transforms.md
26 | customize_runtime.md
27 |
28 | How to
29 | ************************
30 |
31 | .. toctree::
32 | :maxdepth: 1
33 |
34 | how_to.md
35 |
--------------------------------------------------------------------------------
/docs/en/advanced_guides/models.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/models.md
--------------------------------------------------------------------------------
/docs/en/advanced_guides/transforms.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/advanced_guides/transforms.md
--------------------------------------------------------------------------------
/docs/en/api.rst:
--------------------------------------------------------------------------------
1 | mmrotate.apis
2 | --------------
3 | .. automodule:: mmrotate.apis
4 | :members:
5 |
6 | mmrotate.core
7 | --------------
8 |
9 | anchor
10 | ^^^^^^^^^^
11 | .. automodule:: mmrotate.core.anchor
12 | :members:
13 |
14 | bbox
15 | ^^^^^^^^^^
16 | .. automodule:: mmrotate.core.bbox
17 | :members:
18 |
19 | patch
20 | ^^^^^^^^^^
21 | .. automodule:: mmrotate.core.patch
22 | :members:
23 |
24 | evaluation
25 | ^^^^^^^^^^
26 | .. automodule:: mmrotate.core.evaluation
27 | :members:
28 |
29 | post_processing
30 | ^^^^^^^^^^^^^^^
31 | .. automodule:: mmrotate.core.post_processing
32 | :members:
33 |
34 | visualization
35 | ^^^^^^^^^^^^^
36 | .. automodule:: mmrotate.core.visualization
37 | :members:
38 |
39 | mmrotate.datasets
40 | --------------
41 |
42 | datasets
43 | ^^^^^^^^^^
44 | .. automodule:: mmrotate.datasets
45 | :members:
46 |
47 | pipelines
48 | ^^^^^^^^^^
49 | .. automodule:: mmrotate.datasets.pipelines
50 | :members:
51 |
52 | mmrotate.models
53 | --------------
54 |
55 | detectors
56 | ^^^^^^^^^^
57 | .. automodule:: mmrotate.models.detectors
58 | :members:
59 |
60 | backbones
61 | ^^^^^^^^^^
62 | .. automodule:: mmrotate.models.backbones
63 | :members:
64 |
65 | necks
66 | ^^^^^^^^^^^^
67 | .. automodule:: mmrotate.models.necks
68 | :members:
69 |
70 | dense_heads
71 | ^^^^^^^^^^^^
72 | .. automodule:: mmrotate.models.dense_heads
73 | :members:
74 |
75 | roi_heads
76 | ^^^^^^^^^^
77 | .. automodule:: mmrotate.models.roi_heads
78 | :members:
79 |
80 | losses
81 | ^^^^^^^^^^
82 | .. automodule:: mmrotate.models.losses
83 | :members:
84 |
85 | utils
86 | ^^^^^^^^^^
87 | .. automodule:: mmrotate.models.utils
88 | :members:
89 |
90 | mmrotate.utils
91 | --------------
92 | .. automodule:: mmrotate.utils
93 | :members:
94 |
--------------------------------------------------------------------------------
/docs/en/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to MMRotate's documentation!
2 | =======================================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 | :caption: Get Started
7 |
8 | overview.md
9 | get_started.md
10 |
11 | .. toctree::
12 | :maxdepth: 2
13 | :caption: User Guides
14 |
15 | user_guides/index.rst
16 |
17 | .. toctree::
18 | :maxdepth: 2
19 | :caption: Advanced Guides
20 |
21 | advanced_guides/index.rst
22 |
23 | .. toctree::
24 | :maxdepth: 1
25 | :caption: Migration
26 |
27 | migration.md
28 |
29 | .. toctree::
30 | :maxdepth: 1
31 | :caption: API Reference
32 |
33 | api.rst
34 |
35 | .. toctree::
36 | :maxdepth: 1
37 | :caption: Model Zoo
38 |
39 | model_zoo.md
40 |
41 | .. toctree::
42 | :maxdepth: 1
43 | :caption: Notes
44 |
45 | notes/contribution_guide.md
46 | notes/projects.md
47 | notes/changelog.md
48 | notes/changelog_v0.x.md
49 | notes/faq.md
50 |
51 | .. toctree::
52 | :caption: Switch Language
53 |
54 | switch_language.md
55 |
56 |
57 |
58 | Indices and tables
59 | ==================
60 |
61 | * :ref:`genindex`
62 | * :ref:`search`
63 |
--------------------------------------------------------------------------------
/docs/en/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/en/migration.md:
--------------------------------------------------------------------------------
1 | # Migration
2 |
--------------------------------------------------------------------------------
/docs/en/notes/projects.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/notes/projects.md
--------------------------------------------------------------------------------
/docs/en/stat.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/stat.py
--------------------------------------------------------------------------------
/docs/en/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/docs/en/user_guides/dataset_prepare.md:
--------------------------------------------------------------------------------
1 | # Dataset Preparation (To be updated)
2 |
3 | Please refer to [data preparation](https://github.com/open-mmlab/mmrotate/tree/main/tools/data) for dataset preparation.
4 |
--------------------------------------------------------------------------------
/docs/en/user_guides/deploy.md:
--------------------------------------------------------------------------------
1 | # Model Deployment (To be updated)
2 |
3 | MMRotate 1.x fully relies on [MMDeploy](https://mmdeploy.readthedocs.io/) to deploy models.
4 | Please stay tuned and this document will be update soon.
5 |
--------------------------------------------------------------------------------
/docs/en/user_guides/index.rst:
--------------------------------------------------------------------------------
1 | Train & Test
2 | **************
3 |
4 | MMRotate provides dozens of pretrained detection models in `Model Zoo `_,
5 | and supports multiple standard datasets, including DOTA, HRSC2016, SSDD, HRSID, etc. This note will show how to perform common tasks on these existing models and standard datasets:
6 |
7 |
8 | .. toctree::
9 | :maxdepth: 1
10 |
11 | config.md
12 | inference.md
13 | dataset_prepare.md
14 | train_test.md
15 | test_results_submission.md
16 |
17 |
18 | Useful Tools
19 | ************
20 |
21 | .. toctree::
22 | :maxdepth: 1
23 |
24 | useful_tools.md
25 | visualization.md
26 | deploy.md
27 | label_studio.md
28 |
--------------------------------------------------------------------------------
/docs/en/user_guides/inference.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/user_guides/inference.md
--------------------------------------------------------------------------------
/docs/en/user_guides/test_results_submission.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/user_guides/test_results_submission.md
--------------------------------------------------------------------------------
/docs/en/user_guides/visualization.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/en/user_guides/visualization.md
--------------------------------------------------------------------------------
/docs/zh_cn/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../image/mmrotate-logo.png");
3 | background-size: 160px 40px;
4 | height: 40px;
5 | width: 160px;
6 | }
7 |
--------------------------------------------------------------------------------
/docs/zh_cn/_static/image/mmrotate-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/_static/image/mmrotate-logo.png
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/conventions.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/conventions.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/customize_losses.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/customize_losses.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/customize_transforms.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/customize_transforms.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/data_flow.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/data_flow.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/datasets.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/datasets.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/engine.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/engine.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/evaluation.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/evaluation.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/how_to.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/how_to.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/index.rst:
--------------------------------------------------------------------------------
1 | 基础概念
2 | ***************
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | data_flow.md
8 | structures.md
9 | models.md
10 | datasets.md
11 | transforms.md
12 | evaluation.md
13 | engine.md
14 | conventions.md
15 |
16 | 组件定制
17 | ************************
18 |
19 | .. toctree::
20 | :maxdepth: 1
21 |
22 | customize_models.md
23 | customize_losses.md
24 | customize_dataset.md
25 | customize_transforms.md
26 | customize_runtime.md
27 |
28 | How to
29 | ************************
30 |
31 | .. toctree::
32 | :maxdepth: 1
33 |
34 | how_to.md
35 |
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/models.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/models.md
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/transforms.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/advanced_guides/transforms.md
--------------------------------------------------------------------------------
/docs/zh_cn/api.rst:
--------------------------------------------------------------------------------
1 | mmrotate.apis
2 | --------------
3 | .. automodule:: mmrotate.apis
4 | :members:
5 |
6 | mmrotate.core
7 | --------------
8 |
9 | anchor
10 | ^^^^^^^^^^
11 | .. automodule:: mmrotate.core.anchor
12 | :members:
13 |
14 | bbox
15 | ^^^^^^^^^^
16 | .. automodule:: mmrotate.core.bbox
17 | :members:
18 |
19 | patch
20 | ^^^^^^^^^^
21 | .. automodule:: mmrotate.core.patch
22 | :members:
23 |
24 | evaluation
25 | ^^^^^^^^^^
26 | .. automodule:: mmrotate.core.evaluation
27 | :members:
28 |
29 | post_processing
30 | ^^^^^^^^^^^^^^^
31 | .. automodule:: mmrotate.core.post_processing
32 | :members:
33 |
34 | visualization
35 | ^^^^^^^^^^^^^
36 | .. automodule:: mmrotate.core.visualization
37 | :members:
38 |
39 | mmrotate.datasets
40 | --------------
41 |
42 | datasets
43 | ^^^^^^^^^^
44 | .. automodule:: mmrotate.datasets
45 | :members:
46 |
47 | pipelines
48 | ^^^^^^^^^^
49 | .. automodule:: mmrotate.datasets.pipelines
50 | :members:
51 |
52 | mmrotate.models
53 | --------------
54 |
55 | detectors
56 | ^^^^^^^^^^
57 | .. automodule:: mmrotate.models.detectors
58 | :members:
59 |
60 | backbones
61 | ^^^^^^^^^^
62 | .. automodule:: mmrotate.models.backbones
63 | :members:
64 |
65 | necks
66 | ^^^^^^^^^^^^
67 | .. automodule:: mmrotate.models.necks
68 | :members:
69 |
70 | dense_heads
71 | ^^^^^^^^^^^^
72 | .. automodule:: mmrotate.models.dense_heads
73 | :members:
74 |
75 | roi_heads
76 | ^^^^^^^^^^
77 | .. automodule:: mmrotate.models.roi_heads
78 | :members:
79 |
80 | losses
81 | ^^^^^^^^^^
82 | .. automodule:: mmrotate.models.losses
83 | :members:
84 |
85 | utils
86 | ^^^^^^^^^^
87 | .. automodule:: mmrotate.models.utils
88 | :members:
89 |
90 | mmrotate.utils
91 | --------------
92 | .. automodule:: mmrotate.utils
93 | :members:
94 |
--------------------------------------------------------------------------------
/docs/zh_cn/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to MMRotate's documentation!
2 | =======================================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 | :caption: 开始你的第一步
7 |
8 | overview.md
9 | get_started.md
10 |
11 | .. toctree::
12 | :maxdepth: 2
13 | :caption: 使用指南
14 |
15 | user_guides/index.rst
16 |
17 | .. toctree::
18 | :maxdepth: 2
19 | :caption: 进阶教程
20 |
21 | advanced_guides/index.rst
22 |
23 | .. toctree::
24 | :maxdepth: 1
25 | :caption: 迁移指南
26 |
27 | migration.md
28 |
29 | .. toctree::
30 | :maxdepth: 1
31 | :caption: 接口文档(英文)
32 |
33 | api.rst
34 |
35 | .. toctree::
36 | :maxdepth: 1
37 | :caption: 模型仓库
38 |
39 | model_zoo.md
40 |
41 | .. toctree::
42 | :maxdepth: 1
43 | :caption: 说明
44 |
45 | notes/contribution_guide.md
46 | notes/projects.md
47 | notes/changelog.md
48 | notes/changelog_v0.x.md
49 | notes/faq.md
50 |
51 | .. toctree::
52 | :caption: 语言切换
53 |
54 | switch_language.md
55 |
56 |
57 |
58 | Indices and tables
59 | ==================
60 |
61 | * :ref:`genindex`
62 | * :ref:`search`
63 |
--------------------------------------------------------------------------------
/docs/zh_cn/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/zh_cn/migration.md:
--------------------------------------------------------------------------------
1 | # Migration
2 |
--------------------------------------------------------------------------------
/docs/zh_cn/notes/contribution_guide.md:
--------------------------------------------------------------------------------
1 | # 贡献指南(待更新)
2 |
--------------------------------------------------------------------------------
/docs/zh_cn/notes/projects.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/notes/projects.md
--------------------------------------------------------------------------------
/docs/zh_cn/stat.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/stat.py
--------------------------------------------------------------------------------
/docs/zh_cn/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/dataset_prepare.md:
--------------------------------------------------------------------------------
1 | # 准备数据集 (待更新)
2 |
3 | 具体的细节可以参考 [准备数据](https://github.com/open-mmlab/mmrotate/tree/main/tools/data) 下载并组织数据集。
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/deploy.md:
--------------------------------------------------------------------------------
1 | # 模型部署 (待更新)
2 |
3 | MMRotate 1.x 完全基于 [MMDeploy](https://mmdeploy.readthedocs.io/) 來部署模型。 我们将在下一个版本完善这个文档。
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/index.rst:
--------------------------------------------------------------------------------
1 | 训练 & 测试
2 | **************
3 |
4 | MMRotate 提供了几十个预训练模型 `Model Zoo `_,
5 | 并且支持了多个标准数据集, 包括 DOTA, HRSC2016, SSDD, HRSID 等。本说明将展示如何在这些现有模型和标准数据集上执行常见任务:
6 |
7 |
8 | .. toctree::
9 | :maxdepth: 1
10 |
11 | config.md
12 | inference.md
13 | dataset_prepare.md
14 | train_test.md
15 | test_results_submission.md
16 |
17 |
18 | 实用工具
19 | ************
20 |
21 | .. toctree::
22 | :maxdepth: 1
23 |
24 | useful_tools.md
25 | visualization.md
26 | deploy.md
27 | label_studio.md
28 |
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/inference.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/user_guides/inference.md
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/test_results_submission.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/user_guides/test_results_submission.md
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/visualization.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/docs/zh_cn/user_guides/visualization.md
--------------------------------------------------------------------------------
/mmrotate/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import mmcv
3 | import mmdet
4 | import mmengine
5 | from mmengine.utils import digit_version
6 |
7 | from .version import __version__, short_version
8 |
9 | mmcv_minimum_version = '2.0.0rc4'
10 | mmcv_maximum_version = '2.1.0'
11 | mmcv_version = digit_version(mmcv.__version__)
12 |
13 | assert (mmcv_version >= digit_version(mmcv_minimum_version)
14 | and mmcv_version <= digit_version(mmcv_maximum_version)), \
15 | f'MMCV {mmcv.__version__} is incompatible with MMRotate {__version__}. ' \
16 | f'Please use MMCV >= {mmcv_minimum_version}, ' \
17 | f'<= {mmcv_maximum_version} instead.'
18 |
19 | mmengine_minimum_version = '0.6.0'
20 | mmengine_maximum_version = '1.0.0'
21 | mmengine_version = digit_version(mmengine.__version__)
22 |
23 | assert (mmengine_version >= digit_version(mmengine_minimum_version)
24 | and mmengine_version < digit_version(mmengine_maximum_version)), \
25 | f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
26 | f'Please install mmengine>={mmengine_minimum_version}, ' \
27 | f'<{mmengine_maximum_version}.'
28 |
29 | mmdet_minimum_version = '3.0.0rc6'
30 | mmdet_maximum_version = '3.3.0'
31 | mmdet_version = digit_version(mmdet.__version__)
32 |
33 | assert (mmdet_version >= digit_version(mmdet_minimum_version)
34 | and mmdet_version <= digit_version(mmdet_maximum_version)), \
35 | f'MMDetection {mmdet.__version__} is incompatible ' \
36 | f'with MMRotate {__version__}. ' \
37 | f'Please use MMDetection >= {mmdet_minimum_version}, ' \
38 | f'< {mmdet_maximum_version} instead.'
39 |
40 | __all__ = ['__version__', 'short_version', 'digit_version']
41 |
--------------------------------------------------------------------------------
/mmrotate/apis/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .inference import inference_detector_by_patches
3 |
4 | __all__ = ['inference_detector_by_patches']
5 |
--------------------------------------------------------------------------------
/mmrotate/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dior import DIORDataset # noqa: F401, F403
3 | from .dota import DOTAv2Dataset # noqa: F401, F403
4 | from .dota import DOTADataset, DOTAv15Dataset
5 | from .hrsc import HRSCDataset # noqa: F401, F403
6 | from .transforms import * # noqa: F401, F403
7 | from .nwpu45 import NWPU45Dataset
8 | from .instance_dataset import InstanceDataset
9 |
10 | __all__ = [
11 | 'DOTADataset', 'DOTAv15Dataset', 'DOTAv2Dataset', 'HRSCDataset',
12 | 'DIORDataset', 'NWPU45Dataset', 'InstanceDataset'
13 | ]
14 |
--------------------------------------------------------------------------------
/mmrotate/datasets/transforms/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .loading import LoadPatchFromNDArray, LoadEmptyAnnotations
3 | from .transforms import (ConvertBoxType, ConvertMask2BoxType,
4 | RandomChoiceRotate, RandomRotate, Rotate)
5 |
6 | __all__ = [
7 | 'LoadPatchFromNDArray', 'Rotate', 'RandomRotate', 'RandomChoiceRotate',
8 | 'ConvertBoxType', 'ConvertMask2BoxType', 'LoadEmptyAnnotations'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmrotate/engine/__init__.py:
--------------------------------------------------------------------------------
1 | from .hooks import *
--------------------------------------------------------------------------------
/mmrotate/engine/hooks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .mean_teacher_hook import MeanTeacherHook
3 |
4 | __all__ = ['MeanTeacherHook']
--------------------------------------------------------------------------------
/mmrotate/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .functional import * # noqa: F401,F403
3 | from .metrics import * # noqa: F401,F403
4 |
--------------------------------------------------------------------------------
/mmrotate/evaluation/functional/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .mean_ap import eval_rbbox_map
3 |
4 | __all__ = ['eval_rbbox_map']
5 |
--------------------------------------------------------------------------------
/mmrotate/evaluation/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dota_metric import DOTAMetric
3 | from .rotated_coco_metric import RotatedCocoMetric
4 |
5 | __all__ = ['DOTAMetric', 'RotatedCocoMetric']
6 |
--------------------------------------------------------------------------------
/mmrotate/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .backbones import * # noqa: F401, F403
3 | from .dense_heads import * # noqa: F401, F403
4 | from .detectors import * # noqa: F401, F403
5 | from .layers import * # noqa: F401, F403
6 | from .losses import * # noqa: F401, F403
7 | from .necks import * # noqa: F401, F403
8 | from .roi_heads import * # noqa: F401, F403
9 | from .task_modules import * # noqa: F401,F403
10 | from .utils import * # noqa: F401, F403
11 |
--------------------------------------------------------------------------------
/mmrotate/models/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .re_resnet import ReResNet
3 |
4 | __all__ = ['ReResNet']
5 |
--------------------------------------------------------------------------------
/mmrotate/models/dense_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .angle_branch_retina_head import AngleBranchRetinaHead
3 | from .cfa_head import CFAHead
4 | from .h2rbox_head import H2RBoxHead
5 | from .h2rbox_v2_head import H2RBoxV2Head
6 | from .oriented_reppoints_head import OrientedRepPointsHead
7 | from .oriented_rpn_head import OrientedRPNHead
8 | from .r3_head import R3Head, R3RefineHead
9 | from .rotated_atss_head import RotatedATSSHead
10 | from .rotated_fcos_head import RotatedFCOSHead
11 | from .rotated_reppoints_head import RotatedRepPointsHead
12 | from .rotated_retina_head import RotatedRetinaHead
13 | from .rotated_rtmdet_head import RotatedRTMDetHead, RotatedRTMDetSepBNHead
14 | from .s2a_head import S2AHead, S2ARefineHead
15 | from .sam_reppoints_head import SAMRepPointsHead
16 | from .rhino_align_head import RHINOAlignHead
17 | from .rhino_head import RHINOHead
18 | from .rhino_ph_head import RHINOPositiveHungarianHead
19 | from .rhino_phc_head import RHINOPositiveHungarianClassificationHead
20 | from .rotated_conditional_detr_head import RotatedConditionalDETRHead
21 | from .rotated_dab_detr_head import RotatedDABDETRHead
22 | from .rotated_deformable_detr_head import RotatedDeformableDETRHead
23 | from .rotated_detr_head import RotatedDETRHead
24 |
25 |
26 | __all__ = [
27 | 'RotatedRetinaHead', 'OrientedRPNHead', 'RotatedRepPointsHead',
28 | 'SAMRepPointsHead', 'AngleBranchRetinaHead', 'RotatedATSSHead',
29 | 'RotatedFCOSHead', 'OrientedRepPointsHead', 'R3Head', 'R3RefineHead',
30 | 'S2AHead', 'S2ARefineHead', 'CFAHead', 'H2RBoxHead', 'H2RBoxV2Head',
31 | 'RotatedRTMDetHead', 'RotatedRTMDetSepBNHead', 'RotatedDETRHead',
32 | 'RotatedDeformableDETRHead', 'RotatedConditionalDETRHead',
33 | 'RotatedDABDETRHead', 'RHINOHead', 'RHINOPositiveHungarianHead',
34 | 'RHINOPositiveHungarianClassificationHead', 'RHINOAlignHead'
35 | ]
36 |
--------------------------------------------------------------------------------
/mmrotate/models/dense_heads/rotated_conditional_detr_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) SI Analytics. All rights reserved.
2 | # Licensed under the CC BY-NC 4.0 License. See LICENSE file in the project root for full license information.
3 | #
4 | # Copyright (c) OpenMMLab. All rights reserved.
5 | # Licensed under the Apache License, Version 2.0. See LICENSE file in the mmrotate repository for full license information.
6 | from mmdet.models.dense_heads import ConditionalDETRHead
7 |
8 | from mmrotate.registry import MODELS
9 | from .rotated_detr_head import RotatedDETRHead
10 |
11 |
12 | @MODELS.register_module()
13 | class RotatedConditionalDETRHead(ConditionalDETRHead, RotatedDETRHead):
14 | """Rotated version of Head of Conditional DETR.
15 |
16 | Methods are inherited as follows.
17 | ConditionalDETRHead
18 | - init_weights
19 | - forward
20 | - loss
21 | - loss_and_predict
22 | - predict
23 | RotatedConditionalDETRHead
24 | - other methods
25 | """
26 | pass
27 |
--------------------------------------------------------------------------------
/mmrotate/models/dense_heads/rotated_dab_detr_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) SI Analytics. All rights reserved.
2 | # Licensed under the CC BY-NC 4.0 License. See LICENSE file in the project root for full license information.
3 | #
4 | # Copyright (c) OpenMMLab. All rights reserved.
5 | # Licensed under the Apache License, Version 2.0. See LICENSE file in the mmrotate repository for full license information.
6 | from mmcv.cnn import Linear
7 | from mmdet.models.dense_heads import DABDETRHead
8 | from mmdet.models.layers import MLP
9 |
10 | from mmrotate.registry import MODELS
11 | from .rotated_conditional_detr_head import RotatedConditionalDETRHead
12 |
13 |
14 | @MODELS.register_module()
15 | class RotatedDABDETRHead(DABDETRHead, RotatedConditionalDETRHead):
16 | """Rotated version of Head of DAB-DETR.
17 |
18 | Methods are inherited as follows.
19 | DABDETRHead
20 | - init_weights
21 | - forward
22 | - predict
23 | RotatedConditionalDETRHead
24 | - other methods
25 | """
26 |
27 | def _init_layers(self) -> None:
28 | """Initialize layers of the transformer head."""
29 | # cls branch
30 | self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)
31 | # reg branch
32 | self.fc_reg = MLP(self.embed_dims, self.embed_dims, self.reg_dim, 3)
33 |
--------------------------------------------------------------------------------
/mmrotate/models/detectors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .h2rbox import H2RBoxDetector
3 | from .h2rbox_v2 import H2RBoxV2Detector
4 | from .refine_single_stage import RefineSingleStageDetector
5 | from .semi_base import RotatedSemiBaseDetector
6 | from .rotated_soft_teacher import RotatedSoftTeacher
7 | from .rhino import RHINO
8 | from .rotated_dab_detr import RotatedDABDETR
9 | from .rotated_deformable_detr import RotatedDeformableDETR
10 |
11 | __all__ = ['RefineSingleStageDetector', 'H2RBoxDetector', 'H2RBoxV2Detector',
12 | 'RotatedSemiBaseDetector', 'RotatedSoftTeacher',
13 | 'RotatedDABDETR', 'RotatedDeformableDETR', 'RHINO']
14 |
--------------------------------------------------------------------------------
/mmrotate/models/detectors/rotated_dab_detr.py:
--------------------------------------------------------------------------------
1 | # Ref: https://github.com/SIAnalytics/RHINO
2 |
3 | # Copyright (c) SI Analytics. All rights reserved.
4 | # Licensed under the CC BY-NC 4.0 License. See LICENSE file in the project root for full license information.
5 | #
6 | # Copyright (c) OpenMMLab. All rights reserved.
7 | # Licensed under the Apache License, Version 2.0. See LICENSE file in the mmrotate repository for full license information.
8 | from mmdet.models.detectors.dab_detr import DABDETR
9 | from mmdet.models.layers import SinePositionalEncoding
10 | from mmdet.models.layers.transformer import DABDetrTransformerEncoder
11 | from torch import nn
12 |
13 | from mmrotate.registry import MODELS
14 | from ..layers import RotatedDABDetrTransformerDecoder
15 |
16 |
17 | @MODELS.register_module()
18 | class RotatedDABDETR(DABDETR):
19 | r"""Angle refine version of DAB-DETR:
20 |
21 | Code is modified from the mmdet.
22 | """
23 |
24 | def _init_layers(self) -> None:
25 | """Initialize decoder as RotatedDABDetrTransformerDecoder for angle
26 | refine."""
27 | self.positional_encoding = SinePositionalEncoding(
28 | **self.positional_encoding)
29 | self.encoder = DABDetrTransformerEncoder(**self.encoder)
30 | self.decoder = RotatedDABDetrTransformerDecoder(**self.decoder)
31 | self.embed_dims = self.encoder.embed_dims
32 | self.query_dim = self.decoder.query_dim
33 | self.query_embedding = nn.Embedding(self.num_queries, self.query_dim)
34 | if self.num_patterns > 0:
35 | self.patterns = nn.Embedding(self.num_patterns, self.embed_dims)
36 |
37 | num_feats = self.positional_encoding.num_feats
38 | assert num_feats * 2 == self.embed_dims, \
39 | f'embed_dims should be exactly 2 times of num_feats. ' \
40 | f'Found {self.embed_dims} and {num_feats}.'
41 |
--------------------------------------------------------------------------------
/mmrotate/models/layers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .align import FRM, AlignConv, DCNAlignModule, PseudoAlignModule
3 | from .transformer import (RhinoTransformerDecoder, RhinoTransformerDecoderV2,
4 | RhinoTransformerDecoderV4, RotatedCdnQueryGenerator,
5 | RotatedDABDetrTransformerDecoder,
6 | RotatedDeformableDetrTransformerDecoder,
7 | coordinate_to_encoding, RotatedMultiScaleDeformableAttention)
8 |
9 | __all__ = ['FRM', 'AlignConv', 'DCNAlignModule', 'PseudoAlignModule',
10 | 'coordinate_to_encoding', 'RotatedDABDetrTransformerDecoder',
11 | 'RotatedDeformableDetrTransformerDecoder', 'RhinoTransformerDecoder',
12 | 'RotatedCdnQueryGenerator', 'RhinoTransformerDecoderV2',
13 | 'RhinoTransformerDecoderV4', 'RotatedMultiScaleDeformableAttention']
14 |
--------------------------------------------------------------------------------
/mmrotate/models/layers/transformer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) SI Analytics. All rights reserved.
2 | # Licensed under the CC BY-NC 4.0 License. See LICENSE file in the project root for full license information.
3 | #
4 | # Copyright (c) OpenMMLab. All rights reserved.
5 | # Licensed under the Apache License, Version 2.0. See LICENSE file in the mmrotate repository for full license information.
6 | from .rhino_layers import RhinoTransformerDecoder, RotatedCdnQueryGenerator
7 | from .rhino_layers_v2 import (RhinoTransformerDecoderV2,
8 | RhinoTransformerDecoderV4)
9 | from .rotated_dab_detr_layers import RotatedDABDetrTransformerDecoder
10 | from .rotated_deformable_detr_layers import \
11 | RotatedDeformableDetrTransformerDecoder
12 | from .utils import coordinate_to_encoding
13 | from .rotated_attention import RotatedMultiScaleDeformableAttention
14 |
15 | __all__ = [
16 | 'coordinate_to_encoding',
17 | 'RotatedDABDetrTransformerDecoder',
18 | 'RotatedDeformableDetrTransformerDecoder',
19 | 'RhinoTransformerDecoder',
20 | 'RotatedCdnQueryGenerator',
21 | 'RhinoTransformerDecoderV2',
22 | 'RhinoTransformerDecoderV4',
23 | 'RotatedMultiScaleDeformableAttention'
24 | ]
25 |
--------------------------------------------------------------------------------
/mmrotate/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .convex_giou_loss import BCConvexGIoULoss, ConvexGIoULoss
3 | from .gaussian_dist_loss import GDLoss
4 | from .gaussian_dist_loss_v1 import GDLoss_v1
5 | from .h2rbox_consistency_loss import H2RBoxConsistencyLoss
6 | from .h2rbox_v2_consistency_loss import H2RBoxV2ConsistencyLoss
7 | from .kf_iou_loss import KFLoss
8 | from .rotated_iou_loss import RotatedIoULoss
9 | from .smooth_focal_loss import SmoothFocalLoss
10 | from .spatial_border_loss import SpatialBorderLoss
11 |
12 | __all__ = [
13 | 'GDLoss', 'GDLoss_v1', 'KFLoss', 'ConvexGIoULoss', 'BCConvexGIoULoss',
14 | 'SmoothFocalLoss', 'RotatedIoULoss', 'SpatialBorderLoss',
15 | 'H2RBoxConsistencyLoss', 'H2RBoxV2ConsistencyLoss'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmrotate/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .re_fpn import ReFPN
3 |
4 | __all__ = ['ReFPN']
5 |
--------------------------------------------------------------------------------
/mmrotate/models/roi_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bbox_heads import RotatedShared2FCBBoxHead
3 | from .gv_ratio_roi_head import GVRatioRoIHead
4 | from .roi_extractors import RotatedSingleRoIExtractor
5 |
6 | __all__ = [
7 | 'RotatedShared2FCBBoxHead', 'RotatedSingleRoIExtractor', 'GVRatioRoIHead'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmrotate/models/roi_heads/bbox_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .convfc_rbbox_head import RotatedShared2FCBBoxHead
3 | from .gv_bbox_head import GVBBoxHead
4 |
5 | __all__ = ['RotatedShared2FCBBoxHead', 'GVBBoxHead']
6 |
--------------------------------------------------------------------------------
/mmrotate/models/roi_heads/roi_extractors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .rotate_single_level_roi_extractor import RotatedSingleRoIExtractor
3 |
4 | __all__ = ['RotatedSingleRoIExtractor']
5 |
--------------------------------------------------------------------------------
/mmrotate/models/task_modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .assigners import * # noqa: F401,F403
3 | from .coders import * # noqa: F401,F403
4 | from .prior_generators import * # noqa: F401,F403
5 |
--------------------------------------------------------------------------------
/mmrotate/models/task_modules/assigners/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .convex_assigner import ConvexAssigner
3 | from .max_convex_iou_assigner import MaxConvexIoUAssigner
4 | from .rotate_iou2d_calculator import (FakeRBboxOverlaps2D,
5 | QBbox2HBboxOverlaps2D,
6 | RBbox2HBboxOverlaps2D, RBboxOverlaps2D)
7 | from .rotated_atss_assigner import RotatedATSSAssigner
8 | from .sas_assigner import SASAssigner
9 | from .dn_group_hungarian_assigner import DNGroupHungarianAssigner
10 | from .match_cost import CenterL1Cost, GDCost, RBoxL1Cost, RotatedIoUCost
11 |
12 | __all__ = [
13 | 'ConvexAssigner', 'MaxConvexIoUAssigner', 'SASAssigner',
14 | 'RotatedATSSAssigner', 'RBboxOverlaps2D', 'FakeRBboxOverlaps2D',
15 | 'RBbox2HBboxOverlaps2D', 'QBbox2HBboxOverlaps2D', 'RBoxL1Cost', 'GDCost',
16 | 'RotatedIoUCost', 'CenterL1Cost', 'DNGroupHungarianAssigner'
17 | ]
18 |
--------------------------------------------------------------------------------
/mmrotate/models/task_modules/coders/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .angle_coder import CSLCoder, PSCCoder, PseudoAngleCoder
3 | from .delta_midpointoffset_rbbox_coder import MidpointOffsetCoder
4 | from .delta_xywh_hbbox_coder import DeltaXYWHHBBoxCoder
5 | from .delta_xywh_qbbox_coder import DeltaXYWHQBBoxCoder
6 | from .delta_xywht_hbbox_coder import DeltaXYWHTHBBoxCoder
7 | from .delta_xywht_rbbox_coder import DeltaXYWHTRBBoxCoder
8 | from .distance_angle_point_coder import DistanceAnglePointCoder
9 | from .gliding_vertex_coder import GVFixCoder, GVRatioCoder
10 |
11 | __all__ = [
12 | 'DeltaXYWHTRBBoxCoder', 'DeltaXYWHTHBBoxCoder', 'MidpointOffsetCoder',
13 | 'GVFixCoder', 'GVRatioCoder', 'CSLCoder', 'PSCCoder',
14 | 'DistanceAnglePointCoder', 'DeltaXYWHHBBoxCoder', 'DeltaXYWHQBBoxCoder',
15 | 'PseudoAngleCoder'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmrotate/models/task_modules/coders/delta_xywh_hbbox_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from typing import Union
3 |
4 | from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
5 | from mmdet.models.task_modules.coders.delta_xywh_bbox_coder import bbox2delta
6 | from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
7 | from torch import Tensor
8 |
9 | from mmrotate.registry import TASK_UTILS
10 | from mmrotate.structures.bbox import RotatedBoxes
11 |
12 |
13 | @TASK_UTILS.register_module()
14 | class DeltaXYWHHBBoxCoder(DeltaXYWHBBoxCoder):
15 | """Delta XYWH HBBox coder.
16 |
17 | This coder is almost the same as `DeltaXYWHBBoxCoder`. Besides the
18 | gt_bboxes of encode is :obj:`RotatedBoxes`.
19 | """
20 |
21 | def encode(self, bboxes: Union[HorizontalBoxes, Tensor],
22 | gt_bboxes: Union[RotatedBoxes, Tensor]) -> Tensor:
23 | """Get box regression transformation deltas that can be used to
24 | transform the ``bboxes`` into the ``gt_bboxes``.
25 |
26 | Args:
27 | bboxes (:obj:`HorizontalBoxes` or Tensor): Source boxes, e.g.,
28 | object proposals.
29 | gt_bboxes (:obj:`RotatedBoxes` or Tensor): Target of the
30 | transformation, e.g., ground-truth boxes.
31 | Returns:
32 | Tensor: Box transformation deltas
33 | """
34 |
35 | assert bboxes.size(0) == gt_bboxes.size(0)
36 | assert bboxes.size(-1) == 4
37 | assert gt_bboxes.size(-1) == 5
38 |
39 | bboxes = get_box_tensor(bboxes)
40 |
41 | if not isinstance(gt_bboxes, RotatedBoxes):
42 | gt_bboxes = RotatedBoxes(gt_bboxes)
43 | gt_bboxes = gt_bboxes.convert_to('hbox').tensor
44 |
45 | encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
46 | return encoded_bboxes
47 |
--------------------------------------------------------------------------------
/mmrotate/models/task_modules/coders/delta_xywh_qbbox_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from typing import Union
3 |
4 | from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
5 | from mmdet.models.task_modules.coders.delta_xywh_bbox_coder import bbox2delta
6 | from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
7 | from torch import Tensor
8 |
9 | from mmrotate.registry import TASK_UTILS
10 | from mmrotate.structures.bbox import QuadriBoxes
11 |
12 |
13 | @TASK_UTILS.register_module()
14 | class DeltaXYWHQBBoxCoder(DeltaXYWHBBoxCoder):
15 | """Delta XYWH QBBox coder.
16 |
17 | This coder is almost the same as `DeltaXYWHBBoxCoder`. Besides the
18 | gt_bboxes of encode is :obj:`QuadriBoxes`.
19 | """
20 |
21 | def encode(self, bboxes: Union[HorizontalBoxes, Tensor],
22 | gt_bboxes: Union[QuadriBoxes, Tensor]) -> Tensor:
23 | """Get box regression transformation deltas that can be used to
24 | transform the ``bboxes`` into the ``gt_bboxes``.
25 |
26 | Args:
27 | bboxes (:obj:`HorizontalBoxes` or Tensor): Source boxes, e.g.,
28 | object proposals.
29 | gt_bboxes (:obj:`QuadriBoxes` or Tensor): Target of the
30 | transformation, e.g., ground-truth boxes.
31 | Returns:
32 | Tensor: Box transformation deltas
33 | """
34 |
35 | assert bboxes.size(0) == gt_bboxes.size(0)
36 | assert bboxes.size(-1) == 4
37 | assert gt_bboxes.size(-1) == 8
38 |
39 | bboxes = get_box_tensor(bboxes)
40 |
41 | if not isinstance(gt_bboxes, QuadriBoxes):
42 | gt_bboxes = QuadriBoxes(gt_bboxes)
43 | gt_bboxes = gt_bboxes.convert_to('hbox').tensor
44 |
45 | encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
46 | return encoded_bboxes
47 |
--------------------------------------------------------------------------------
/mmrotate/models/task_modules/prior_generators/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .anchor_generator import (FakeRotatedAnchorGenerator,
3 | PseudoRotatedAnchorGenerator)
4 |
5 | __all__ = ['PseudoRotatedAnchorGenerator', 'FakeRotatedAnchorGenerator']
6 |
--------------------------------------------------------------------------------
/mmrotate/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .misc import (convex_overlaps, get_num_level_anchors_inside,
3 | levels_to_images, points_center_pts, filter_gt_instances,
4 | _filter_rpn_results_by_score)
5 | from .orconv import ORConv2d
6 | from .ripool import RotationInvariantPooling
7 |
8 | __all__ = [
9 | 'ORConv2d', 'RotationInvariantPooling', 'get_num_level_anchors_inside',
10 | 'points_center_pts', 'levels_to_images', 'convex_overlaps', 'filter_gt_instances',
11 | '_filter_rpn_results_by_score'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmrotate/models/utils/ripool.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from torch import nn
3 |
4 |
5 | class RotationInvariantPooling(nn.Module):
6 | """Rotating invariant pooling module.
7 |
8 | Args:
9 | nInputPlane (int): The number of Input plane.
10 | nOrientation (int, optional): The number of oriented channels.
11 | """
12 |
13 | def __init__(self, nInputPlane, nOrientation=8):
14 | super(RotationInvariantPooling, self).__init__()
15 | self.nInputPlane = nInputPlane
16 | self.nOrientation = nOrientation
17 |
18 | def forward(self, x):
19 | """Forward function."""
20 | N, c, h, w = x.size()
21 | x = x.view(N, -1, self.nOrientation, h, w)
22 | x, _ = x.max(dim=2, keepdim=False)
23 | return x
24 |
--------------------------------------------------------------------------------
/mmrotate/structures/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bbox import * # noqa: F401, F403
3 |
--------------------------------------------------------------------------------
/mmrotate/structures/bbox/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .bbox_overlaps import fake_rbbox_overlaps, rbbox_overlaps
3 | from .box_converters import (hbox2qbox, hbox2rbox, qbox2hbox, qbox2rbox,
4 | rbox2hbox, rbox2qbox)
5 | from .quadri_boxes import QuadriBoxes
6 | from .rotated_boxes import RotatedBoxes
7 | from .transforms import (distance2obb, gaussian2bbox, gt2gaussian, norm_angle,
8 | rbox_project)
9 |
10 | __all__ = [
11 | 'QuadriBoxes', 'RotatedBoxes', 'hbox2rbox', 'hbox2qbox', 'rbox2hbox',
12 | 'rbox2qbox', 'qbox2hbox', 'qbox2rbox', 'gaussian2bbox', 'gt2gaussian',
13 | 'norm_angle', 'rbbox_overlaps', 'fake_rbbox_overlaps', 'distance2obb',
14 | 'rbox_project'
15 | ]
--------------------------------------------------------------------------------
/mmrotate/testing/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from ._utils import demo_mm_inputs, demo_mm_proposals, get_detector_cfg
3 |
4 | __all__ = ['get_detector_cfg', 'demo_mm_inputs', 'demo_mm_proposals']
5 |
--------------------------------------------------------------------------------
/mmrotate/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .collect_env import collect_env
3 | from .misc import get_test_pipeline_cfg
4 | from .patch import get_multiscale_patch, merge_results_by_nms, slide_window
5 | from .setup_env import register_all_modules
6 |
7 | __all__ = [
8 | 'collect_env', 'register_all_modules', 'get_test_pipeline_cfg',
9 | 'get_multiscale_patch', 'merge_results_by_nms', 'slide_window'
10 | ]
11 |
--------------------------------------------------------------------------------
/mmrotate/utils/collect_env.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.utils import get_git_hash
3 | from mmengine.utils.dl_utils import collect_env as collect_base_env
4 |
5 | import mmrotate
6 |
7 |
8 | def collect_env():
9 | """Collect environment information."""
10 | env_info = collect_base_env()
11 | env_info['MMRotate'] = (
12 | mmrotate.__version__ + '+' + get_git_hash(digits=7))
13 | return env_info
14 |
15 |
16 | if __name__ == '__main__':
17 | for name, val in collect_env().items():
18 | print(f'{name}: {val}')
19 |
--------------------------------------------------------------------------------
/mmrotate/utils/misc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from typing import Union
3 |
4 | from mmengine.config import Config, ConfigDict
5 |
6 |
7 | def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
8 | """Get the test dataset pipeline from entire config.
9 |
10 | Args:
11 | cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
12 | file or a ``ConfigDict``.
13 |
14 | Returns:
15 | :obj:`ConfigDict`: the config of test dataset.
16 | """
17 | if isinstance(cfg, str):
18 | cfg = Config.fromfile(cfg)
19 |
20 | def _get_test_pipeline_cfg(dataset_cfg):
21 | if 'pipeline' in dataset_cfg:
22 | return dataset_cfg.pipeline
23 | # handle dataset wrapper
24 | elif 'dataset' in dataset_cfg:
25 | return _get_test_pipeline_cfg(dataset_cfg.dataset)
26 | # handle dataset wrappers like ConcatDataset
27 | elif 'datasets' in dataset_cfg:
28 | return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
29 |
30 | raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
31 |
32 | return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
33 |
--------------------------------------------------------------------------------
/mmrotate/utils/patch/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .merge_results import merge_results_by_nms
3 | from .split import get_multiscale_patch, slide_window
4 |
5 | __all__ = ['merge_results_by_nms', 'get_multiscale_patch', 'slide_window']
6 |
--------------------------------------------------------------------------------
/mmrotate/utils/setup_env.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import datetime
3 | import warnings
4 |
5 | from mmengine import DefaultScope
6 |
7 |
8 | def register_all_modules(init_default_scope: bool = True) -> None:
9 | """Register all modules in mmrotate into the registries.
10 |
11 | Args:
12 | init_default_scope (bool): Whether initialize the mmrotate default scope.
13 | When `init_default_scope=True`, the global default scope will be
14 | set to `mmrotate`, anmmrotate all registries will build modules from mmrotate's
15 | registry node. To understand more about the registry, please refer
16 | to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
17 | Defaults to True.
18 | """ # noqa
19 | import mmrotate.datasets # noqa: F401,F403
20 | import mmrotate.evaluation # noqa: F401,F403
21 | import mmrotate.models # noqa: F401,F403
22 | import mmrotate.visualization # noqa: F401,F403
23 |
24 | if init_default_scope:
25 | never_created = DefaultScope.get_current_instance() is None \
26 | or not DefaultScope.check_instance_created('mmrotate')
27 | if never_created:
28 | DefaultScope.get_instance('mmrotate', scope_name='mmrotate')
29 | return
30 | current_scope = DefaultScope.get_current_instance()
31 | if current_scope.scope_name != 'mmrotate':
32 | warnings.warn('The current default scope '
33 | f'"{current_scope.scope_name}" is not "mmrotate", '
34 | '`register_all_modules` will force the current'
35 | 'default scope to be "mmrotate". If this is not '
36 | 'expected, please set `init_default_scope=False`.')
37 | # avoid name conflict
38 | new_instance_name = f'mmrotate-{datetime.datetime.now()}'
39 | DefaultScope.get_instance(new_instance_name, scope_name='mmrotate')
40 |
--------------------------------------------------------------------------------
/mmrotate/version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | __version__ = '1.0.0rc1'
4 | short_version = __version__
5 |
--------------------------------------------------------------------------------
/mmrotate/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .local_visualizer import RotLocalVisualizer
3 | from .palette import get_palette
4 |
5 | __all__ = ['get_palette', 'RotLocalVisualizer']
6 |
--------------------------------------------------------------------------------
/mmrotate/visualization/palette.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from typing import List, Tuple, Union
3 |
4 | import mmcv
5 | import numpy as np
6 | from mmengine.utils import is_str
7 |
8 |
9 | def get_palette(palette: Union[List[tuple], str, tuple],
10 | num_classes: int) -> List[Tuple[int]]:
11 | """Get palette from various inputs.
12 |
13 | Args:
14 | palette (list[tuple] | str | tuple): palette inputs.
15 | num_classes (int): the number of classes.
16 | Returns:
17 | list[tuple[int]]: A list of color tuples.
18 | """
19 | assert isinstance(num_classes, int)
20 |
21 | if isinstance(palette, list):
22 | dataset_palette = palette
23 | elif isinstance(palette, tuple):
24 | dataset_palette = [palette] * num_classes
25 | elif palette == 'random' or palette is None:
26 | state = np.random.get_state()
27 | # random color
28 | np.random.seed(42)
29 | palette = np.random.randint(0, 256, size=(num_classes, 3))
30 | np.random.set_state(state)
31 | dataset_palette = [tuple(c) for c in palette]
32 | elif palette == 'dota':
33 | from mmrotate.datasets import DOTADataset
34 | dataset_palette = DOTADataset.METAINFO['palette']
35 | elif palette == 'sar':
36 | from mmrotate.datasets import SARDataset
37 | dataset_palette = SARDataset.METAINFO['palette']
38 | elif palette == 'hrsc':
39 | from mmrotate.datasets import HRSCDataset
40 | dataset_palette = HRSCDataset.METAINFO['palette']
41 | elif is_str(palette):
42 | dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
43 | else:
44 | raise TypeError(f'Invalid type for palette: {type(palette)}')
45 |
46 | assert len(dataset_palette) >= num_classes, \
47 | 'The length of palette should not be less than `num_classes`.'
48 | return dataset_palette
49 |
--------------------------------------------------------------------------------
/model-index.yml:
--------------------------------------------------------------------------------
1 | Import:
2 | - configs/cfa/metafile.yml
3 | - configs/convnext/metafile.yml
4 | - configs/csl/metafile.yml
5 | - configs/gliding_vertex/metafile.yml
6 | - configs/gwd/metafile.yml
7 | - configs/h2rbox/metafile.yml
8 | - configs/kfiou/metafile.yml
9 | - configs/kld/metafile.yml
10 | - configs/oriented_rcnn/metafile.yml
11 | - configs/oriented_reppoints/metafile.yml
12 | - configs/psc/metafile.yml
13 | - configs/r3det/metafile.yml
14 | - configs/redet/metafile.yml
15 | - configs/roi_trans/metafile.yml
16 | - configs/rotated_atss/metafile.yml
17 | - configs/rotated_faster_rcnn/metafile.yml
18 | - configs/rotated_fcos/metafile.yml
19 | - configs/rotated_reppoints/metafile.yml
20 | - configs/rotated_retinanet/metafile.yml
21 | - configs/rotated_rtmdet/metafile.yml
22 | - configs/s2anet/metafile.yml
23 | - configs/sasm_reppoints/metafile.yml
24 |
--------------------------------------------------------------------------------
/projects/CastDetv2/castdet/__init__.py:
--------------------------------------------------------------------------------
1 | from .castdet import RotatedCastDet
2 | from .ovd_bbox_head import Shared2FCBBoxHeadZSD, Projection2
3 | from .pseudo_label_queue import PseudoQueue
4 | from .modified_resnet import ModifiedResNet2
5 | from .standard_roi_head2 import StandardRoIHead2
6 |
7 | __all__ = [
8 | 'RotatedCastDet', 'Shared2FCBBoxHeadZSD', 'Projection2', 'PseudoQueue',
9 | 'ModifiedResNet2', 'StandardRoIHead2'
10 | ]
--------------------------------------------------------------------------------
/projects/CastDetv2/configs/oriented-rcnn_r50-fpn_20k_visdronezsd_base-set.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | 'mmrotate::_base_/models/oriented-rcnn-le90_r50_fpn.py',
3 | 'mmrotate::_base_/default_runtime.py',
4 | 'mmrotate::_base_/datasets/visdronezsd.py'
5 | ]
6 |
7 | batch_size = 8
8 | num_workers = 2
9 | train_dataloader = dict(
10 | batch_size=batch_size,
11 | num_workers=num_workers,
12 |
13 | )
14 |
15 | model = dict(
16 | roi_head = dict(
17 | bbox_head = dict(num_classes=16)
18 | )
19 | )
20 |
21 | # training schedule for 180k
22 | train_cfg = dict(
23 | type='IterBasedTrainLoop', max_iters=20000, val_interval=4000)
24 | val_cfg = dict(type='ValLoop')
25 | test_cfg = dict(type='TestLoop')
26 |
27 | # learning rate policy
28 | param_scheduler = [
29 | dict(
30 | type='LinearLR', start_factor= 1.0 / 3, by_epoch=False, begin=0, end=500),
31 | dict(
32 | type='MultiStepLR',
33 | begin=0,
34 | end=20000,
35 | by_epoch=False,
36 | milestones=[16000, 18000],
37 | gamma=0.1)
38 | ]
39 |
40 | # optimizer
41 | optim_wrapper = dict(
42 | type='OptimWrapper',
43 | optimizer=dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001),
44 | clip_grad=dict(max_norm=35, norm_type=2))
45 |
46 |
47 | default_hooks = dict(
48 | logger=dict(type='LoggerHook', interval=20),
49 | checkpoint=dict(by_epoch=False, interval=4000, max_keep_ckpts=5))
50 | log_processor = dict(by_epoch=False)
51 |
52 | visualizer = dict(
53 | vis_backends=[
54 | dict(type='LocalVisBackend'),
55 | dict(type='TensorboardVisBackend')
56 | ])
57 |
58 | # for debug
59 | # load_from = "work_dirs/soft-teacher_faster-rcnn_r50-caffe_fpn_80k_semi-dior/iter_10000.pth"
--------------------------------------------------------------------------------
/projects/CastDetv2/resources/castdet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/CastDetv2/resources/castdet.png
--------------------------------------------------------------------------------
/projects/CastDetv2/resources/remoteCLIP_embeddings_bgs_normalized.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/CastDetv2/resources/remoteCLIP_embeddings_bgs_normalized.npy
--------------------------------------------------------------------------------
/projects/CastDetv2/resources/remoteCLIP_embeddings_normalized.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/CastDetv2/resources/remoteCLIP_embeddings_normalized.npy
--------------------------------------------------------------------------------
/projects/CastDetv2/resources/results_hbb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/CastDetv2/resources/results_hbb.png
--------------------------------------------------------------------------------
/projects/CastDetv2/resources/results_obb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/CastDetv2/resources/results_obb.png
--------------------------------------------------------------------------------
/projects/CastDetv2/resources/toolbox.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/CastDetv2/resources/toolbox.png
--------------------------------------------------------------------------------
/projects/CastDetv2/resources/vis_result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/CastDetv2/resources/vis_result.png
--------------------------------------------------------------------------------
/projects/CastDetv2/run.sh:
--------------------------------------------------------------------------------
1 | DEVICES_ID=2
2 | exp="oriented-rcnn_r50-fpn_20k_visdronezsd_base-set"
3 |
4 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
5 | projects/CastDetv2/configs/$exp.py
6 |
7 | python projects/CastDetv2/tools/merge_weights.py \
8 | --clip_path checkpoints/RemoteCLIP-RN50.pt \
9 | --base_path work_dirs/$exp/iter_20000.pth \
10 | --save_path work_dirs/$exp/merged_castdet_init_iter20k.pth \
11 | --base_model faster-rcnn
12 |
13 |
14 | exp="visdrone_step2_castdet_12b_10k_oriented"
15 |
16 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
17 | projects/CastDetv2/configs/$exp.py
18 |
19 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/test.py \
20 | projects/CastDetv2/configs/$exp.py \
21 | work_dirs/$exp/iter_10000.pth \
22 | --work-dir work_dirs/$exp/dior_test
23 |
--------------------------------------------------------------------------------
/projects/CastDetv2/tools/merge_weights.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- encoding: utf-8 -*-
3 | '''
4 | @File : merge_weights.py
5 | @Version : 1.0
6 | @Time : 2024/04/20 00:00:16
7 | @E-mail : daodao123@sjtu.edu.cn
8 | @Introduction : None
9 | '''
10 |
11 | import argparse
12 | import torch
13 |
14 | def merge_weights(clip_path, base_path, save_path, base_model='soft-teacher', target_model='castdet'):
15 | clip = torch.load(clip_path)#.state_dict()
16 | base = torch.load(base_path)
17 | save_dict = {}
18 | if target_model == 'castdet':
19 | for k, v in base['state_dict'].items():
20 | if base_model == 'soft-teacher':
21 | save_dict[k] = v
22 | elif base_model == 'faster-rcnn':
23 | save_dict['teacher.' + k] = v
24 | save_dict['student.' + k] = v
25 | elif target_model == 'vild':
26 | save_dict = base['state_dict']
27 |
28 | for k, v in clip.items():
29 | if k.startswith('visual.'):
30 | save_dict[k] = v
31 |
32 | print(save_dict.keys())
33 | torch.save(save_dict, save_path)
34 |
35 | def main():
36 | parser = argparse.ArgumentParser(description="Merge weights from CLIP and a base detection model")
37 | parser.add_argument("--clip_path", type=str, required=True, help="Path to the CLIP model checkpoint")
38 | parser.add_argument("--base_path", type=str, required=True, help="Path to the base model checkpoint")
39 | parser.add_argument("--save_path", type=str, required=True, help="Path where the merged model will be saved")
40 | parser.add_argument("--base_model", type=str, default="soft-teacher", choices=["soft-teacher", "faster-rcnn"], help="Base model type: 'soft-teacher' or 'faster-rcnn'")
41 | parser.add_argument("--target_model", type=str, default="castdet", choices=["castdet", "vild"], help="Target model type: 'castdet' or 'vild'")
42 |
43 | args = parser.parse_args()
44 | merge_weights(args.clip_path, args.base_path, args.save_path, args.base_model)
45 |
46 | if __name__ == "__main__":
47 | main()
48 |
--------------------------------------------------------------------------------
/projects/GLIP/glip/__init__.py:
--------------------------------------------------------------------------------
1 | from .atss_vlfusion_head import RotatedATSSVLFusionHead
2 |
3 | __all__ = [
4 | 'RotatedATSSVLFusionHead'
5 | ]
--------------------------------------------------------------------------------
/projects/GLIP/run.sh:
--------------------------------------------------------------------------------
1 | DEVICES_ID=3
2 |
3 | exp1="glip_atss_r50_a_fpn_dyhead_visdronezsd_base"
4 |
5 | # Step1: train base-detector
6 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
7 | projects/GLIP/configs/$exp1.py
8 |
9 | # Step2.1: pseudo-labeling
10 | exp2="glip_atss_r50_a_fpn_dyhead_visdronezsd_base_nwpu45_pseudo_labeling"
11 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/test.py \
12 | projects/GLIP/configs/$exp2.py \
13 | work_dirs/$exp1/iter_20000.pth
14 |
15 | # Step2.2: merge predictions
16 | python projects/GroundingDINO/tools/merge_ovdg_preds.py \
17 | --ann_path data/NWPU-RESISC45/annotations/nwpu45_unlabeled_2.json \
18 | --pred_path work_dirs/$exp2/nwpu45_pseudo_labeling_2.bbox.json \
19 | --save_path work_dirs/$exp2/nwpu45_unlabeled_with_glip_pseudos_2.json
20 |
21 | cp work_dirs/$exp2/nwpu45_unlabeled_with_glip_pseudos_2.json data/NWPU-RESISC45/annotations/nwpu45_unlabeled_with_glip_pseudos_2.json
22 |
23 | # Step3: self-training
24 | exp3="glip_atss_r50_a_fpn_dyhead_visdronezsd_base_nwpu"
25 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
26 | projects/GLIP/configs/$exp3.py
27 |
28 | # Step4: test
29 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/test.py \
30 | projects/GLIP/configs/$exp3.py \
31 | work_dirs/$exp3/iter_10000.pth \
32 | --work-dir work_dirs/$exp3/dior_test
33 |
--------------------------------------------------------------------------------
/projects/GroundingDINO/groundingdino/__init__.py:
--------------------------------------------------------------------------------
1 | from .grounding_dino import RotatedGroundingDINO
2 | from .grounding_dino_head import RotatedGroundingDINOHead
3 |
4 | __all__ = [
5 | 'RotatedGroundingDINO', 'RotatedGroundingDINOHead'
6 | ]
--------------------------------------------------------------------------------
/projects/GroundingDINO/run.sh:
--------------------------------------------------------------------------------
1 | DEVICES_ID=1
2 |
3 | # Step1: train base-detector
4 | exp1="grounding_dino_swin-t_visdrone_base-set_adamw"
5 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
6 | projects/GroundingDINO/configs/$exp1.py
7 |
8 | # Step2.1: pseudo-labeling
9 | exp2="grounding_dino_swin-t_visdrone_base-set_adamw_nwpu45_pseudo_labeling"
10 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/test.py \
11 | projects/GroundingDINO/configs/$exp2.py \
12 | work_dirs/$exp1/iter_20000.pth
13 |
14 | # Step2.2: merge predictions
15 | python projects/GroundingDINO/tools/merge_ovdg_preds.py \
16 | --ann_path data/NWPU-RESISC45/annotations/nwpu45_unlabeled_2.json \
17 | --pred_path work_dirs/$exp2/nwpu45_pseudo_labeling_2.bbox.json \
18 | --save_path work_dirs/$exp2/nwpu45_unlabeled_with_gdino_pseudos_swin-t_adamw_top1.json \
19 | --topk 1
20 |
21 | cp work_dirs/$exp2/nwpu45_unlabeled_with_gdino_pseudos_swin-t_adamw_top1.json data/NWPU-RESISC45/annotations/nwpu45_unlabeled_with_gdino_pseudos_swin-t_adamw_top1.json
22 |
23 | # Step3: self-training
24 | exp3="grounding_dino_swin-t_visdrone_base-set_adamw_nwpu45"
25 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
26 | projects/GroundingDINO/configs/$exp3.py \
27 | --work-dir work_dirs/$exp3
28 |
29 | # Step4: test
30 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/test.py \
31 | projects/GroundingDINO/configs/$exp3.py \
32 | work_dirs/$exp3/iter_10000.pth \
33 | --work-dir work_dirs/$exp3/dior_test
34 |
--------------------------------------------------------------------------------
/projects/LSKNet/lsknet/__init__.py:
--------------------------------------------------------------------------------
1 | from .lsknet import LSKNet
2 |
3 | __all__ = ['LSKNet']
4 |
--------------------------------------------------------------------------------
/projects/LabelStudio/backend_template/readme.md:
--------------------------------------------------------------------------------
1 | # Semi-automatic Object Detection Annotation with MMRotate and Label-Studio
2 |
3 | Please read the [Docs](../../docs/en/user_guides/label_studio.md) for more details.
4 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/RR360/configs360/.gitkeep
--------------------------------------------------------------------------------
/projects/RR360/configs360/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | default_scope = 'mmrotate'
2 |
3 | default_hooks = dict(
4 | timer=dict(type='IterTimerHook'),
5 | logger=dict(type='LoggerHook', interval=5),
6 | param_scheduler=dict(type='ParamSchedulerHook'),
7 | # checkpoint=dict(type='CheckpointHook', interval=1),
8 | checkpoint=dict(
9 | type='CheckpointHook',
10 | interval=4,
11 | save_best=['dota/AP50'],
12 | rule='greater',
13 | max_keep_ckpts=1),
14 | sampler_seed=dict(type='DistSamplerSeedHook'),
15 | visualization=dict(type='mmdet.DetVisualizationHook'))
16 |
17 | vis_backends = [
18 | dict(type='LocalVisBackend'),
19 | # dict(type='WandbVisBackend',
20 | # init_kwargs=dict(project='trbox'))
21 | ]
22 |
23 | env_cfg = dict(
24 | cudnn_benchmark=False,
25 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
26 | dist_cfg=dict(backend='nccl'),
27 | )
28 |
29 | visualizer = dict(
30 | type='RR360LocalVisualizer', vis_backends=vis_backends, name='visualizer')
31 | log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
32 |
33 | log_level = 'INFO'
34 | load_from = None
35 | resume = False
36 |
37 | custom_imports = dict(
38 | imports=[
39 | # 'mmcls.models',
40 | 'projects.RR360.visualization',
41 | 'projects.RR360.structures',
42 | 'projects.RR360.datasets.transforms',
43 | 'projects.RR360.evaluation',
44 | # 'projects.RR360.models'
45 | ],
46 | allow_failed_imports=False)
47 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/_base_/schedules/schedule_1x.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR',
10 | start_factor=1.0 / 3,
11 | by_epoch=False,
12 | begin=0,
13 | end=500),
14 | dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=12,
18 | by_epoch=True,
19 | milestones=[8, 11],
20 | gamma=0.1)
21 | ]
22 |
23 | # optimizer
24 | optim_wrapper = dict(
25 | type='OptimWrapper',
26 | optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
27 | clip_grad=dict(max_norm=35, norm_type=2))
28 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/_base_/schedules/schedule_3x.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=1)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR',
10 | start_factor=1.0 / 3,
11 | by_epoch=False,
12 | begin=0,
13 | end=500),
14 | dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=36,
18 | by_epoch=True,
19 | milestones=[24, 33],
20 | gamma=0.1)
21 | ]
22 |
23 | # optimizer
24 | optim_wrapper = dict(
25 | type='OptimWrapper',
26 | optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
27 | clip_grad=dict(max_norm=35, norm_type=2))
28 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/_base_/schedules/schedule_6x.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=72, val_interval=6)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR',
10 | start_factor=1.0 / 3,
11 | by_epoch=False,
12 | begin=0,
13 | end=500),
14 | dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=72,
18 | by_epoch=True,
19 | milestones=[48, 66],
20 | gamma=0.1)
21 | ]
22 |
23 | # optimizer
24 | optim_wrapper = dict(
25 | type='OptimWrapper',
26 | optimizer=dict(
27 | type='SGD', lr=0.0025 / 2, momentum=0.9, weight_decay=0.0001),
28 | clip_grad=dict(max_norm=35, norm_type=2))
29 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/readme.md:
--------------------------------------------------------------------------------
1 | # configs for 360 detection
2 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/rotated_rtmdet_x3/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | default_scope = 'mmrotate'
2 |
3 | default_hooks = dict(
4 | timer=dict(type='IterTimerHook'),
5 | logger=dict(type='LoggerHook', interval=5),
6 | param_scheduler=dict(type='ParamSchedulerHook'),
7 | checkpoint=dict(type='CheckpointHook', interval=12, max_keep_ckpts=3),
8 | sampler_seed=dict(type='DistSamplerSeedHook'),
9 | visualization=dict(type='mmdet.DetVisualizationHook'))
10 |
11 | env_cfg = dict(
12 | cudnn_benchmark=False,
13 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14 | dist_cfg=dict(backend='nccl'),
15 | )
16 |
17 | vis_backends = [
18 | dict(type='LocalVisBackend'),
19 | # dict(type='WandbVisBackend', init_kwargs=dict(project='trbox'))
20 | ]
21 |
22 | visualizer = dict(
23 | type='RR360LocalVisualizer', vis_backends=vis_backends, name='visualizer')
24 | log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
25 |
26 | log_level = 'INFO'
27 | load_from = None
28 | resume = False
29 |
30 | custom_hooks = [
31 | dict(type='mmdet.NumClassCheckHook'),
32 | dict(
33 | type='EMAHook',
34 | ema_type='mmdet.ExpMomentumEMA',
35 | momentum=0.0002,
36 | update_buffers=True,
37 | priority=49)
38 | ]
39 |
40 | custom_imports = dict(
41 | imports=[
42 | # 'mmcls.models',
43 | 'projects.RR360.visualization',
44 | 'projects.RR360.structures',
45 | 'projects.RR360.datasets.transforms',
46 | 'projects.RR360.evaluation',
47 | # 'projects.RR360.models',
48 | ],
49 | allow_failed_imports=False)
50 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/rotated_rtmdet_x3/_base_/schedule_3x.py:
--------------------------------------------------------------------------------
1 | max_epochs = 3 * 12
2 | base_lr = 0.004 / 16
3 | interval = 6
4 |
5 | train_cfg = dict(
6 | type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=interval)
7 | val_cfg = dict(type='ValLoop')
8 | test_cfg = dict(type='TestLoop')
9 |
10 | # learning rate
11 | param_scheduler = [
12 | dict(
13 | type='LinearLR',
14 | start_factor=1.0e-5,
15 | by_epoch=False,
16 | begin=0,
17 | end=1000),
18 | dict(
19 | type='CosineAnnealingLR',
20 | eta_min=base_lr * 0.05,
21 | begin=max_epochs // 2,
22 | end=max_epochs,
23 | T_max=max_epochs // 2,
24 | by_epoch=True,
25 | convert_to_iter_based=True),
26 | ]
27 |
28 | # optimizer
29 | optim_wrapper = dict(
30 | type='OptimWrapper',
31 | optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
32 | paramwise_cfg=dict(
33 | norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
34 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/rotated_rtmdet_x3_r/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | default_scope = 'mmrotate'
2 |
3 | default_hooks = dict(
4 | timer=dict(type='IterTimerHook'),
5 | logger=dict(type='LoggerHook', interval=5),
6 | param_scheduler=dict(type='ParamSchedulerHook'),
7 | checkpoint=dict(type='CheckpointHook', interval=12, max_keep_ckpts=3),
8 | sampler_seed=dict(type='DistSamplerSeedHook'),
9 | visualization=dict(type='mmdet.DetVisualizationHook'))
10 |
11 | env_cfg = dict(
12 | cudnn_benchmark=False,
13 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14 | dist_cfg=dict(backend='nccl'),
15 | )
16 |
17 | vis_backends = [
18 | dict(type='LocalVisBackend'),
19 | # dict(type='WandbVisBackend', init_kwargs=dict(project='trbox'))
20 | ]
21 |
22 | visualizer = dict(
23 | type='RR360LocalVisualizer', vis_backends=vis_backends, name='visualizer')
24 | log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
25 |
26 | log_level = 'INFO'
27 | load_from = None
28 | resume = False
29 |
30 | custom_hooks = [
31 | dict(type='mmdet.NumClassCheckHook'),
32 | dict(
33 | type='EMAHook',
34 | ema_type='mmdet.ExpMomentumEMA',
35 | momentum=0.0002,
36 | update_buffers=True,
37 | priority=49)
38 | ]
39 |
40 | custom_imports = dict(
41 | imports=[
42 | # 'mmcls.models',
43 | 'projects.RR360.visualization',
44 | 'projects.RR360.structures',
45 | 'projects.RR360.datasets.transforms',
46 | 'projects.RR360.evaluation',
47 | # 'projects.RR360.models',
48 | ],
49 | allow_failed_imports=False)
50 |
--------------------------------------------------------------------------------
/projects/RR360/configs360/rotated_rtmdet_x3_r/_base_/schedule_3x.py:
--------------------------------------------------------------------------------
1 | max_epochs = 3 * 12
2 | base_lr = 0.004 / 16
3 | interval = 6
4 |
5 | train_cfg = dict(
6 | type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=interval)
7 | val_cfg = dict(type='ValLoop')
8 | test_cfg = dict(type='TestLoop')
9 |
10 | # learning rate
11 | param_scheduler = [
12 | dict(
13 | type='LinearLR',
14 | start_factor=1.0e-5,
15 | by_epoch=False,
16 | begin=0,
17 | end=1000),
18 | dict(
19 | type='CosineAnnealingLR',
20 | eta_min=base_lr * 0.05,
21 | begin=max_epochs // 2,
22 | end=max_epochs,
23 | T_max=max_epochs // 2,
24 | by_epoch=True,
25 | convert_to_iter_based=True),
26 | ]
27 |
28 | # optimizer
29 | optim_wrapper = dict(
30 | type='OptimWrapper',
31 | optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
32 | paramwise_cfg=dict(
33 | norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
34 |
--------------------------------------------------------------------------------
/projects/RR360/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .transforms import * # noqa: F401, F403
2 |
--------------------------------------------------------------------------------
/projects/RR360/datasets/transforms/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .transforms import RotateAutoBound
3 |
4 | __all__ = ['RotateAutoBound']
5 |
--------------------------------------------------------------------------------
/projects/RR360/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .functional import * # noqa: F401,F403
3 | from .metrics import * # noqa: F401,F403
4 |
--------------------------------------------------------------------------------
/projects/RR360/evaluation/functional/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .mean_ap import eval_rbbox_head_map
3 |
4 | __all__ = ['eval_rbbox_head_map']
5 |
--------------------------------------------------------------------------------
/projects/RR360/evaluation/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dota_r360_metric import DOTAR360Metric
3 |
4 | __all__ = ['DOTAR360Metric']
5 |
--------------------------------------------------------------------------------
/projects/RR360/structures/__init__.py:
--------------------------------------------------------------------------------
1 | from .bbox import * # noqa: F401,F403
2 |
--------------------------------------------------------------------------------
/projects/RR360/structures/bbox/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | # from .rotated_boxes import RotatedBoxes, rbox2qbox
4 | from .rotated_boxes import RotatedBoxes
5 |
6 | __all__ = ['RotatedBoxes']
7 | # __all__ = [
8 | # 'RotatedBoxes', 'rbox2qbox'
9 | # ]
10 |
--------------------------------------------------------------------------------
/projects/RR360/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .local_visualizer import RR360LocalVisualizer
3 |
4 | __all__ = ['RR360LocalVisualizer']
5 |
--------------------------------------------------------------------------------
/projects/ViLD/resources/vild_framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/projects/ViLD/resources/vild_framework.png
--------------------------------------------------------------------------------
/projects/ViLD/run.sh:
--------------------------------------------------------------------------------
1 | DEVICES_ID=0
2 |
3 | # Step1: train the base model
4 | exp1="oriented-rcnn_r50-fpn_20k_visdronezsd_base-set"
5 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
6 | projects/ViLD/configs/$exp1.py
7 |
8 | ## Step2: merge weights
9 | python projects/CastDetv2/tools/merge_weights.py \
10 | --clip_path checkpoints/RemoteCLIP-RN50.pt \
11 | --base_path work_dirs/$exp1/iter_20000.pth \
12 | --save_path work_dirs/$exp1/merged_vild_init_iter20k.pth
13 | --target_model vild
14 |
15 |
16 | # Step3: prepare pseudo labels
17 | exp2="vild_oriented-rcnn_r50_fpn_visdronezsd_step1_prepare"
18 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/test.py \
19 | projects/ViLD/configs/$exp2.py \
20 | work_dirs/$exp1/merged_vild_init_iter20k.pth
21 |
22 | # Step4: self-training
23 | exp3="vild_oriented-rcnn_r50_fpn_visdronezsd_step2_finetune"
24 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/train.py \
25 | projects/ViLD/configs/$exp3.py
26 |
27 | # Step5: test
28 | CUDA_VISIBLE_DEVICES=$DEVICES_ID python tools/test.py \
29 | projects/ViLD/configs/$exp3.py \
30 | work_dirs/$exp3/iter_10000.pth \
31 | --work-dir work_dirs/$exp3/dior_test
32 |
33 |
--------------------------------------------------------------------------------
/projects/ViLD/vild/__init__.py:
--------------------------------------------------------------------------------
1 | from .rotated_vild import RotatedViLD
2 | from .ovd_bbox_head import Shared2FCBBoxHeadZSD, Projection2
3 | from .modified_resnet import ModifiedResNet2
4 |
5 | __all__ = [
6 | 'RotatedViLD', 'Shared2FCBBoxHeadZSD', 'Projection2'
7 | 'ModifiedResNet2'
8 | ]
--------------------------------------------------------------------------------
/projects/example_project/configs/r3det-oc_dummy-resnet_fpn_1x_dota.py:
--------------------------------------------------------------------------------
1 | _base_ = ['../../../configs/r3det/r3det-oc_r50_fpn_1x_dota.py']
2 |
3 | custom_imports = dict(imports=['projects.example_project.dummy'])
4 |
5 | _base_.model.backbone.type = 'DummyResNet'
6 |
--------------------------------------------------------------------------------
/projects/example_project/dummy/__init__.py:
--------------------------------------------------------------------------------
1 | from .dummy_resnet import DummyResNet
2 |
3 | __all__ = ['DummyResNet']
4 |
--------------------------------------------------------------------------------
/projects/example_project/dummy/dummy_resnet.py:
--------------------------------------------------------------------------------
1 | from mmdet.models.backbones import ResNet
2 |
3 | from mmrotate.registry import MODELS
4 |
5 |
6 | @MODELS.register_module()
7 | class DummyResNet(ResNet):
8 | """Implements a dummy ResNet wrapper for demonstration purpose.
9 | Args:
10 | **kwargs: All the arguments are passed to the parent class.
11 | """
12 |
13 | def __init__(self, **kwargs) -> None:
14 | print('Hello world!')
15 | super().__init__(**kwargs)
16 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -r requirements/build.txt
2 | -r requirements/optional.txt
3 | -r requirements/runtime.txt
4 | -r requirements/tests.txt
5 |
--------------------------------------------------------------------------------
/requirements/build.txt:
--------------------------------------------------------------------------------
1 | # These must be installed before building mmrotate
2 | cython
3 | numpy
4 |
--------------------------------------------------------------------------------
/requirements/docs.txt:
--------------------------------------------------------------------------------
1 | docutils==0.16.0
2 | myst-parser
3 | -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
4 | sphinx==4.0.2
5 | sphinx-copybutton
6 | sphinx_markdown_tables
7 | sphinx_rtd_theme==0.5.2
8 |
--------------------------------------------------------------------------------
/requirements/mminstall.txt:
--------------------------------------------------------------------------------
1 | mmcv>=2.0.0rc2,<2.1.0
2 | mmdet>=3.0.0rc2,<3.2.0
3 | mmengine>= 0.1.0
4 |
--------------------------------------------------------------------------------
/requirements/multimodal.txt:
--------------------------------------------------------------------------------
1 | fairscale
2 | jsonlines
3 | nltk
4 | pycocoevalcap
5 | transformers
6 |
--------------------------------------------------------------------------------
/requirements/optional.txt:
--------------------------------------------------------------------------------
1 | imagecorruptions
2 | scikit-learn
3 | scipy
4 |
--------------------------------------------------------------------------------
/requirements/readthedocs.txt:
--------------------------------------------------------------------------------
1 | e2cnn
2 | mmcv>=2.0.0rc2
3 | mmdet>=3.0.0rc2
4 | mmengine>=0.1.0
5 | torch
6 | torchvision
7 |
--------------------------------------------------------------------------------
/requirements/runtime.txt:
--------------------------------------------------------------------------------
1 | matplotlib
2 | numpy
3 | pycocotools
4 | six
5 | terminaltables
6 | torch
7 |
--------------------------------------------------------------------------------
/requirements/tests.txt:
--------------------------------------------------------------------------------
1 | asynctest
2 | codecov
3 | coverage
4 | cython
5 | -e git+https://github.com/QUVA-Lab/e2cnn.git#egg=e2cnn
6 | flake8
7 | interrogate
8 | isort==4.3.21
9 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future.
10 | kwarray
11 | matplotlib
12 | parameterized
13 | pytest
14 | scikit-learn
15 | ubelt
16 | wheel
17 | xdoctest>=0.10.0
18 | yapf
19 |
--------------------------------------------------------------------------------
/resources/mmrotate-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/resources/mmrotate-logo.png
--------------------------------------------------------------------------------
/resources/qq_group_qrcode.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/resources/qq_group_qrcode.jpg
--------------------------------------------------------------------------------
/resources/zhihu_qrcode.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/resources/zhihu_qrcode.jpg
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [isort]
2 | line_length = 79
3 | multi_line_output = 0
4 | known_standard_library = setuptools
5 | known_first_party = mmrotate
6 | known_third_party = PIL,boto3,botocore,cv2,e2cnn,label_studio_ml,label_studio_tools,matplotlib,mmcv,mmdet,mmengine,numpy,parameterized,pycocotools,pytest,pytorch_sphinx_theme,terminaltables,torch,torchvision,ts,yaml
7 | no_lines_before = STDLIB,LOCALFOLDER
8 | default_section = THIRDPARTY
9 |
10 | [yapf]
11 | BASED_ON_STYLE = pep8
12 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
13 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
14 |
15 | [codespell]
16 | skip = *.ipynb
17 | quiet-level = 3
18 | ignore-words-list = DOTA,dota,alse, warmup
19 |
--------------------------------------------------------------------------------
/tests/data/dior/Annotations/Oriented Bounding Boxes/00001.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | /JPEGImages-trainval.zip/JPEGImages-trainval/JPEGImages-trainval
4 | 00001.jpg
5 | /JPEGImages-trainval.zip/JPEGImages-trainval/JPEGImages-trainval
6 |
7 | Unknown
8 |
9 |
10 | 800
11 | 800
12 | 3
13 |
14 | 0
15 |
33 |
34 |
--------------------------------------------------------------------------------
/tests/data/dior/JPEGImage/00001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/tests/data/dior/JPEGImage/00001.jpg
--------------------------------------------------------------------------------
/tests/data/dior/demo.txt:
--------------------------------------------------------------------------------
1 | 00001
2 |
--------------------------------------------------------------------------------
/tests/data/dota/images/P2805__1024__0___0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VisionXLab/CastDet/32e67b53bf739208a69615c91b2789f1be109536/tests/data/dota/images/P2805__1024__0___0.png
--------------------------------------------------------------------------------
/tests/data/dota/labelTxt/P2805__1024__0___0.txt:
--------------------------------------------------------------------------------
1 | 359.0 663.0 369.0 497.0 543.0 509.0 531.0 677.0 plane 0
2 | 540.0 884.0 363.0 862.0 392.0 674.0 570.0 695.0 plane 0
3 | 788.0 844.0 734.0 701.0 916.0 631.0 970.0 762.0 plane 0
4 | 720.0 726.0 668.0 583.0 852.0 494.0 913.0 636.0 plane 0
5 |
--------------------------------------------------------------------------------
/tests/data/hrsc/FullDataSet/Annotations/100000006.xml:
--------------------------------------------------------------------------------
1 |
2 | 100000006
3 | 100000001
4 | 100000001
5 | 100000006
6 | 100000006
7 | bmp
8 | 1900-01-01
9 | sealand
10 |
11 |
12 | 69.040297,33.070036
13 | 1172
14 | 816
15 | 3
16 | 1.07
17 | 18
18 | 100
19 |
20 |
21 | 0
22 | 0
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 | 000d
34 | 1
35 |
36 |
37 | 100000006
38 | 100000013
39 | 100000006
40 | 0
41 | 0
42 | 119
43 | 75
44 | 587
45 | 789
46 | 341.2143
47 | 443.3325
48 | 778.4297
49 | 178.2595
50 | -1.122944
51 | 0
52 |
53 |
54 | 143
55 | 776
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/tests/data/hrsc/demo.txt:
--------------------------------------------------------------------------------
1 | 100000006
2 |
--------------------------------------------------------------------------------
/tests/test_apis/test_inference.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import os.path as osp
3 | from unittest import TestCase
4 |
5 | import numpy as np
6 | import pytest
7 | import torch
8 | from mmdet.apis import init_detector
9 | from mmdet.structures import DetDataSample
10 | from parameterized import parameterized
11 |
12 | from mmrotate.apis import inference_detector_by_patches
13 | from mmrotate.utils import register_all_modules
14 |
15 |
16 | class TestInferenceDetectorByPatches(TestCase):
17 |
18 | def setUp(self):
19 | register_all_modules()
20 |
21 | @parameterized.expand([
22 | ('rotated_retinanet/rotated-retinanet-rbox-le90_r50_fpn_1x_dota.py',
23 | ('cuda', )),
24 | ])
25 | def test_inference_detector_by_patches(self, config, devices):
26 | assert all([device in ['cpu', 'cuda'] for device in devices])
27 |
28 | project_dir = osp.abspath(osp.dirname(osp.dirname(__file__)))
29 | project_dir = osp.join(project_dir, '..')
30 |
31 | config_file = osp.join(project_dir, 'configs', config)
32 |
33 | # test init_detector with config_file: str and cfg_options
34 | rng = np.random.RandomState(0)
35 | img = rng.randint(0, 255, (125, 125, 3), dtype=np.uint8)
36 |
37 | for device in devices:
38 | if device == 'cuda' and not torch.cuda.is_available():
39 | pytest.skip('test requires GPU and torch+cuda')
40 |
41 | model = init_detector(config_file, device=device)
42 | nms_cfg = dict(type='nms_rotated', iou_threshold=0.1)
43 | result = inference_detector_by_patches(model, img, [75], [50],
44 | [1.0], nms_cfg)
45 | assert isinstance(result, DetDataSample)
46 |
--------------------------------------------------------------------------------
/tests/test_datasets/test_dior.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import unittest
3 |
4 | from mmrotate.datasets import DIORDataset
5 |
6 |
7 | class TestDIORDataset(unittest.TestCase):
8 |
9 | def test_dior(self):
10 | dataset = DIORDataset(
11 | data_root='tests/data/dior/',
12 | ann_file='demo.txt',
13 | data_prefix=dict(img_path='JPEGImages/'),
14 | filter_cfg=dict(
15 | filter_empty_gt=True, min_size=32, bbox_min_size=4),
16 | pipeline=[])
17 | dataset.full_init()
18 | self.assertEqual(len(dataset), 1)
19 |
20 | data_list = dataset.load_data_list()
21 | self.assertEqual(len(data_list), 1)
22 | self.assertEqual(data_list[0]['img_id'], '00001')
23 | self.assertEqual(data_list[0]['img_path'].replace('\\', '/'),
24 | 'tests/data/dior/JPEGImages/00001.jpg')
25 | self.assertEqual(
26 | data_list[0]['xml_path'].replace('\\', '/'),
27 | 'tests/data/dior/Annotations/Oriented Bounding Boxes/00001.xml')
28 | self.assertEqual(len(data_list[0]['instances']), 1)
29 | self.assertEqual(dataset.get_cat_ids(0), [9])
30 | self.assertEqual(len(dataset._metainfo['classes']), 20)
31 |
--------------------------------------------------------------------------------
/tests/test_models/test_roi_heads/test_bbox_head/test_gv_bbox_head.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import unittest
3 | from unittest import TestCase
4 |
5 | import torch
6 | from parameterized import parameterized
7 |
8 | from mmrotate.models.roi_heads.bbox_heads import GVBBoxHead
9 | from mmrotate.utils import register_all_modules
10 |
11 |
12 | class TestGVBBoxHead(TestCase):
13 |
14 | def setUp(self):
15 | register_all_modules()
16 |
17 | @parameterized.expand(['cpu', 'cuda'])
18 | def test_forward_loss(self, device):
19 | if device == 'cuda':
20 | if not torch.cuda.is_available():
21 | return unittest.skip('test requires GPU and torch+cuda')
22 |
23 | bbox_head = GVBBoxHead(
24 | in_channels=1,
25 | fc_out_channels=1,
26 | roi_feat_size=7,
27 | num_classes=4,
28 | ratio_thr=0.8,
29 | bbox_coder=dict(
30 | type='DeltaXYWHQBBoxCoder',
31 | target_means=(.0, .0, .0, .0),
32 | target_stds=(0.1, 0.1, 0.2, 0.2)),
33 | fix_coder=dict(type='GVFixCoder'),
34 | ratio_coder=dict(type='GVRatioCoder'),
35 | predict_box_type='rbox',
36 | reg_class_agnostic=True,
37 | loss_cls=dict(
38 | type='mmdet.CrossEntropyLoss',
39 | use_sigmoid=False,
40 | loss_weight=1.0),
41 | loss_bbox=dict(
42 | type='mmdet.SmoothL1Loss', beta=1.0, loss_weight=1.0),
43 | loss_fix=dict(
44 | type='mmdet.SmoothL1Loss', beta=1.0 / 3.0, loss_weight=1.0),
45 | loss_ratio=dict(
46 | type='mmdet.SmoothL1Loss', beta=1.0 / 3.0, loss_weight=16.0))
47 | bbox_head = bbox_head.to(device=device)
48 |
49 | num_samples = 4
50 | feats = torch.rand((num_samples, 1, 7, 7)).to(device)
51 | bbox_head(x=feats)
52 |
--------------------------------------------------------------------------------
/tests/test_models/test_task_modules/test_assigners/test_convex_assigner.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import unittest
3 |
4 | import torch
5 | from mmengine.structures import InstanceData
6 | from mmengine.testing import assert_allclose
7 |
8 | from mmrotate.models.task_modules.assigners import ConvexAssigner
9 |
10 |
11 | class TestConvexAssigner(unittest.TestCase):
12 |
13 | def test_convex_assigner(self):
14 | if not torch.cuda.is_available():
15 | return unittest.skip('test requires GPU and torch+cuda')
16 |
17 | assigner = ConvexAssigner(scale=4, pos_num=1)
18 | priors = torch.FloatTensor([
19 | [0, 0, 2, 2, 5, 5, 10, 10, 10, 5, 10, 0, 8, 0, 5, 0, 2, 0],
20 | [10, 0, 12, 2, 15, 5, 20, 10, 20, 5, 20, 0, 18, 0, 15, 0, 12, 0],
21 | [
22 | 10, 10, 12, 12, 15, 15, 20, 20, 20, 15, 20, 10, 18, 10, 15, 10,
23 | 12, 10
24 | ],
25 | [
26 | 12, 10, 14, 12, 17, 15, 22, 20, 22, 15, 22, 10, 20, 10, 17, 10,
27 | 14, 10
28 | ],
29 | ]).cuda()
30 | gt_bboxes = torch.FloatTensor([
31 | [5, 5, 10, 5, 10, 10, 5, 10],
32 | [5, 15, 15, 15, 15, 20, 5, 20],
33 | ]).cuda()
34 | gt_labels = torch.LongTensor([2, 3]).cuda()
35 |
36 | pred_instances = InstanceData(priors=priors)
37 | gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
38 |
39 | assign_result = assigner.assign(pred_instances, gt_instances)
40 | self.assertEqual(len(assign_result.gt_inds), 4)
41 | self.assertEqual(len(assign_result.labels), 4)
42 |
43 | expected_gt_inds = torch.LongTensor([1, 0, 0, 0]).cuda()
44 | assert_allclose(assign_result.gt_inds, expected_gt_inds)
45 |
--------------------------------------------------------------------------------
/tests/test_models/test_task_modules/test_assigners/test_max_convex_iou_assigner.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import unittest
3 |
4 | import torch
5 | from mmengine.structures import InstanceData
6 | from parameterized import parameterized
7 |
8 | from mmrotate.models.task_modules.assigners import MaxConvexIoUAssigner
9 |
10 |
11 | class TestMaxConvexIoUAssigner(unittest.TestCase):
12 |
13 | @parameterized.expand([(0.5, ), ((0, 0.5), )])
14 | def test_max_convex_iou_assigner(self, neg_iou_thr):
15 |
16 | if not torch.cuda.is_available():
17 | return unittest.skip('test requires GPU and torch+cuda')
18 |
19 | assigner = MaxConvexIoUAssigner(
20 | pos_iou_thr=0.5,
21 | neg_iou_thr=neg_iou_thr,
22 | )
23 | priors = torch.FloatTensor([
24 | [0, 0, 2, 2, 5, 5, 10, 10, 10, 5, 10, 0, 8, 0, 5, 0, 2, 0],
25 | [10, 0, 12, 2, 15, 5, 20, 10, 20, 5, 20, 0, 18, 0, 15, 0, 12, 0],
26 | [
27 | 10, 10, 12, 12, 15, 15, 20, 20, 20, 15, 20, 10, 18, 10, 15, 10,
28 | 12, 10
29 | ],
30 | [
31 | 12, 10, 14, 12, 17, 15, 22, 20, 22, 15, 22, 10, 20, 10, 17, 10,
32 | 14, 10
33 | ],
34 | ]).cuda()
35 | gt_bboxes = torch.FloatTensor([
36 | [5, 5, 5, 4, 0.1],
37 | [5, 15, 5, 5, 0.0],
38 | ]).cuda()
39 | gt_labels = torch.LongTensor([2, 3]).cuda()
40 |
41 | pred_instances = InstanceData(priors=priors)
42 | gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
43 |
44 | assign_result = assigner.assign(pred_instances, gt_instances)
45 | self.assertEqual(len(assign_result.gt_inds), 4)
46 | self.assertEqual(len(assign_result.labels), 4)
47 |
--------------------------------------------------------------------------------
/tests/test_models/test_task_modules/test_coder/test_angle_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from unittest import TestCase
3 |
4 | import torch
5 | from mmengine.testing import assert_allclose
6 |
7 | from mmrotate.models.task_modules.coders import CSLCoder
8 |
9 |
10 | class TestCSLCoder(TestCase):
11 |
12 | def test_encode(self):
13 | coder = CSLCoder(angle_version='oc', omega=10)
14 | angle_preds = torch.Tensor([[0.]])
15 | expected_encode_angles = torch.Tensor(
16 | [[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
17 | out = coder.encode(angle_preds)
18 | assert_allclose(expected_encode_angles, out)
19 |
20 | def test_decode(self):
21 | coder = CSLCoder(angle_version='oc', omega=10)
22 | encode_angles = torch.Tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
23 | out = coder.decode(encode_angles)
24 | assert_allclose([1], out.shape)
25 |
--------------------------------------------------------------------------------
/tests/test_models/test_task_modules/test_coder/test_delta_xywh_hbbox_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from unittest import TestCase
3 |
4 | import torch
5 | from mmdet.structures.bbox import HorizontalBoxes
6 | from mmengine.testing import assert_allclose
7 |
8 | from mmrotate.models.task_modules.coders import DeltaXYWHHBBoxCoder
9 | from mmrotate.structures.bbox import RotatedBoxes
10 |
11 |
12 | class TestDeltaBboxCoder(TestCase):
13 |
14 | def test_encode(self):
15 | coder = DeltaXYWHHBBoxCoder()
16 |
17 | proposals = torch.Tensor([[0., 0., 1., 1.], [0., 0., 2., 2.],
18 | [0., 0., 5., 5.], [5., 5., 10., 10.]])
19 | gt = torch.Tensor([[0., 0., 1., 1., 0.], [0., 0., 1., 1., 0.1],
20 | [0., 0., 1., 1., 0.1], [5., 5., 5., 5., 0.3]])
21 |
22 | expected_encode_bboxes = torch.Tensor(
23 | [[-0.5000, -0.5000, 0.0000, 0.0000],
24 | [-0.5000, -0.5000, -0.6025, -0.6025],
25 | [-0.5000, -0.5000, -1.5188, -1.5188],
26 | [-0.5000, -0.5000, 0.2238, 0.2238]])
27 |
28 | out = coder.encode(HorizontalBoxes(proposals), RotatedBoxes(gt))
29 | assert_allclose(expected_encode_bboxes, out)
30 |
--------------------------------------------------------------------------------
/tests/test_models/test_task_modules/test_coder/test_delta_xywh_qbbox_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from unittest import TestCase
3 |
4 | import torch
5 | from mmdet.structures.bbox import HorizontalBoxes
6 | from mmengine.testing import assert_allclose
7 |
8 | from mmrotate.models.task_modules.coders import DeltaXYWHQBBoxCoder
9 | from mmrotate.structures.bbox import QuadriBoxes
10 |
11 |
12 | class TestDeltaXYWHQBBoxCoder(TestCase):
13 |
14 | def test_encode(self):
15 | coder = DeltaXYWHQBBoxCoder()
16 |
17 | proposals = torch.Tensor([[0., 0., 1., 1.], [0., 0., 2., 2.],
18 | [0., 0., 5., 5.], [5., 5., 10., 10.]])
19 | gt = torch.Tensor([[0., 0., 1., 0., 1., 1., 0., 1.],
20 | [0.1, 0., 1.1, 0., 1.1, 1., 0.1, 1.],
21 | [0., 0.1, 1., 0.1, 1., 1.1, 0., 1.1],
22 | [0.1, 0.1, 1.1, 0.1, 1.1, 1.1, 0.1, 1.1]])
23 |
24 | expected_encode_bboxes = torch.Tensor(
25 | [[0.0000, 0.0000, 0.0000, 0.0000],
26 | [-0.2000, -0.2500, -0.6931, -0.6931],
27 | [-0.4000, -0.3800, -1.6094, -1.6094],
28 | [-1.3800, -1.3800, -1.6094, -1.6094]])
29 |
30 | out = coder.encode(HorizontalBoxes(proposals), QuadriBoxes(gt))
31 | assert_allclose(expected_encode_bboxes, out)
32 |
--------------------------------------------------------------------------------
/tests/test_visualization/test_palette.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmrotate.datasets import DOTADataset
3 | from mmrotate.visualization import get_palette
4 |
5 |
6 | def test_palette():
7 |
8 | # test list
9 | palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
10 | palette_ = get_palette(palette, 3)
11 | for color, color_ in zip(palette, palette_):
12 | assert color == color_
13 |
14 | # test tuple
15 | palette = get_palette((1, 2, 3), 3)
16 | assert len(palette) == 3
17 | for color in palette:
18 | assert color == (1, 2, 3)
19 |
20 | # test color str
21 | palette = get_palette('red', 3)
22 | assert len(palette) == 3
23 | for color in palette:
24 | assert color == (255, 0, 0)
25 |
26 | # test dataset str
27 | palette = get_palette('dota', len(DOTADataset.METAINFO['classes']))
28 | assert len(palette) == len(DOTADataset.METAINFO['classes'])
29 | assert palette[0] == (165, 42, 42)
30 |
31 | # test random
32 | palette1 = get_palette('random', 3)
33 | palette2 = get_palette(None, 3)
34 | for color1, color2 in zip(palette1, palette2):
35 | assert isinstance(color1, tuple)
36 | assert isinstance(color2, tuple)
37 | assert color1 == color2
38 |
--------------------------------------------------------------------------------
/tools/data/README.md:
--------------------------------------------------------------------------------
1 | # Data Preparation for Rotation Detection
2 |
3 | It is recommended to symlink the dataset root to `$MMROTATE/data`.
4 | If your folder structure is different, you may need to change the corresponding paths in config files.
5 |
6 | Datasets supported in MMRotate:
7 |
8 | - [DOTA Dataset](dota/README.md) \[ [Homepage](https://captain-whu.github.io/DOTA/) \]
9 | - [DIOR Dataset](dior/README.md) \[ [Homepage](https://gcheng-nwpu.github.io/#Datasets) \]
10 | - [SSDD Dataset](ssdd/README.md)
11 | - [HRSC Dataset](hrsc/README.md)
12 | - [HRSID Dataset](hrsid/README.md)
13 | - [SRSDD Dataset](srsdd/README.md)
14 | - [RSDD Dataset](rsdd/README.md)
15 |
--------------------------------------------------------------------------------
/tools/data/dior/README.md:
--------------------------------------------------------------------------------
1 | # Preparing DIOR Dataset
2 |
3 |
4 |
5 | ```bibtex
6 | @article{LI2020296,
7 | title = {Object detection in optical remote sensing images: A survey and a new benchmark},
8 | journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
9 | volume = {159},
10 | pages = {296-307},
11 | year = {2020},
12 | issn = {0924-2716},
13 | doi = {https://doi.org/10.1016/j.isprsjprs.2019.11.023},
14 | url = {https://www.sciencedirect.com/science/article/pii/S0924271619302825},
15 | author = {Ke Li and Gang Wan and Gong Cheng and Liqiu Meng and Junwei Han}
16 | ```
17 |
18 | ## Download DIOR dataset
19 |
20 | The DIOR dataset can be downloaded from [here](https://gcheng-nwpu.github.io/#Datasets).
21 |
22 | The data structure is as follows:
23 |
24 | ```none
25 | mmrotate
26 | ├── mmrotate
27 | ├── tools
28 | ├── configs
29 | ├── data
30 | │ ├── DIOR
31 | │ │ ├── JPEGImages-trainval
32 | │ │ ├── JPEGImages-test
33 | │ │ ├── Annotations
34 | │ │ │ ├─ Oriented Bounding Boxes
35 | │ │ │ ├─ Horizontal Bounding Boxes
36 | │ │ ├── ImageSets
37 | │ │ │ ├─ Main
38 | │ │ │ │ ├─ train.txt
39 | │ │ │ │ ├─ val.txt
40 | │ │ │ │ ├─ test.txt
41 | ```
42 |
43 | ## Change base config
44 |
45 | Please change `data_root` in `configs/_base_/datasets/dior.py` to `data/dior/`.
46 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ms_test.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/test/images/"
5 | ],
6 | "ann_dirs": null,
7 | "sizes": [
8 | 1024
9 | ],
10 | "gaps": [
11 | 500
12 | ],
13 | "rates": [
14 | 0.5,
15 | 1.0,
16 | 1.5
17 | ],
18 | "img_rate_thr": 0.6,
19 | "iof_thr": 0.7,
20 | "no_padding": false,
21 | "padding_value": [
22 | 104,
23 | 116,
24 | 124
25 | ],
26 | "save_dir": "data/split_ms_dota/test/",
27 | "save_ext": ".png"
28 | }
29 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ms_train.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/train/images/"
5 | ],
6 | "ann_dirs": [
7 | "data/DOTA/train/labelTxt/"
8 | ],
9 | "sizes": [
10 | 1024
11 | ],
12 | "gaps": [
13 | 500
14 | ],
15 | "rates": [
16 | 0.5,
17 | 1.0,
18 | 1.5
19 | ],
20 | "img_rate_thr": 0.6,
21 | "iof_thr": 0.7,
22 | "no_padding": false,
23 | "padding_value": [
24 | 104,
25 | 116,
26 | 124
27 | ],
28 | "save_dir": "data/split_ms_dota/train/",
29 | "save_ext": ".png"
30 | }
31 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ms_trainval.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/train/images/",
5 | "data/DOTA/val/images/"
6 | ],
7 | "ann_dirs": [
8 | "data/DOTA/train/labelTxt/",
9 | "data/DOTA/val/labelTxt/"
10 | ],
11 | "sizes": [
12 | 1024
13 | ],
14 | "gaps": [
15 | 500
16 | ],
17 | "rates": [
18 | 0.5,
19 | 1.0,
20 | 1.5
21 | ],
22 | "img_rate_thr": 0.6,
23 | "iof_thr": 0.7,
24 | "no_padding": false,
25 | "padding_value": [
26 | 104,
27 | 116,
28 | 124
29 | ],
30 | "save_dir": "data/split_ms_dota/trainval/",
31 | "save_ext": ".png"
32 | }
33 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ms_val.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/val/images/"
5 | ],
6 | "ann_dirs": [
7 | "data/DOTA/val/labelTxt/"
8 | ],
9 | "sizes": [
10 | 1024
11 | ],
12 | "gaps": [
13 | 500
14 | ],
15 | "rates": [
16 | 0.5,
17 | 1.0,
18 | 1.5
19 | ],
20 | "img_rate_thr": 0.6,
21 | "iof_thr": 0.7,
22 | "no_padding": false,
23 | "padding_value": [
24 | 104,
25 | 116,
26 | 124
27 | ],
28 | "save_dir": "data/split_ms_dota/val/",
29 | "save_ext": ".png"
30 | }
31 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ss_test.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/test/images/"
5 | ],
6 | "ann_dirs": null,
7 | "sizes": [
8 | 1024
9 | ],
10 | "gaps": [
11 | 200
12 | ],
13 | "rates": [
14 | 1.0
15 | ],
16 | "img_rate_thr": 0.6,
17 | "iof_thr": 0.7,
18 | "no_padding": false,
19 | "padding_value": [
20 | 104,
21 | 116,
22 | 124
23 | ],
24 | "save_dir": "data/split_ss_dota/test/",
25 | "save_ext": ".png"
26 | }
27 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ss_train.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/train/images/"
5 | ],
6 | "ann_dirs": [
7 | "data/DOTA/train/labelTxt/"
8 | ],
9 | "sizes": [
10 | 1024
11 | ],
12 | "gaps": [
13 | 200
14 | ],
15 | "rates": [
16 | 1.0
17 | ],
18 | "img_rate_thr": 0.6,
19 | "iof_thr": 0.7,
20 | "no_padding": false,
21 | "padding_value": [
22 | 104,
23 | 116,
24 | 124
25 | ],
26 | "save_dir": "data/split_ss_dota/train/",
27 | "save_ext": ".png"
28 | }
29 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ss_trainval.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/train/images/",
5 | "data/DOTA/val/images/"
6 | ],
7 | "ann_dirs": [
8 | "data/DOTA/train/labelTxt/",
9 | "data/DOTA/val/labelTxt/"
10 | ],
11 | "sizes": [
12 | 1024
13 | ],
14 | "gaps": [
15 | 200
16 | ],
17 | "rates": [
18 | 1.0
19 | ],
20 | "img_rate_thr": 0.6,
21 | "iof_thr": 0.7,
22 | "no_padding": false,
23 | "padding_value": [
24 | 104,
25 | 116,
26 | 124
27 | ],
28 | "save_dir": "data/split_ss_dota/trainval/",
29 | "save_ext": ".png"
30 | }
31 |
--------------------------------------------------------------------------------
/tools/data/dota/split/split_configs/ss_val.json:
--------------------------------------------------------------------------------
1 | {
2 | "nproc": 10,
3 | "img_dirs": [
4 | "data/DOTA/val/images/"
5 | ],
6 | "ann_dirs": [
7 | "data/DOTA/val/labelTxt/"
8 | ],
9 | "sizes": [
10 | 1024
11 | ],
12 | "gaps": [
13 | 200
14 | ],
15 | "rates": [
16 | 1.0
17 | ],
18 | "img_rate_thr": 0.6,
19 | "iof_thr": 0.7,
20 | "no_padding": false,
21 | "padding_value": [
22 | 104,
23 | 116,
24 | 124
25 | ],
26 | "save_dir": "data/split_ss_dota/val/",
27 | "save_ext": ".png"
28 | }
29 |
--------------------------------------------------------------------------------
/tools/data/hrsc/README.md:
--------------------------------------------------------------------------------
1 | # Preparing HRSC Dataset
2 |
3 |
4 |
5 | ```bibtex
6 | @conference{hrsc,
7 | author = {Zikun Liu. and Liu Yuan. and Lubin Weng. and Yiping Yang.},
8 | title = {A High Resolution Optical Satellite Image Dataset for Ship Recognition and Some New Baselines},
9 | booktitle = {Proceedings of the 6th International Conference on Pattern Recognition Applications and Methods - ICPRAM,},
10 | year = {2017},
11 | pages = {324-331},
12 | publisher = {SciTePress},
13 | organization = {INSTICC},
14 | doi = {10.5220/0006120603240331},
15 | isbn = {978-989-758-222-6},
16 | issn = {2184-4313},
17 | }
18 | ```
19 |
20 | ## Download HRSC dataset
21 |
22 | The HRSC dataset can be downloaded from [here](https://aistudio.baidu.com/aistudio/datasetdetail/54106).
23 |
24 | The data structure is as follows:
25 |
26 | ```none
27 | mmrotate
28 | ├── mmrotate
29 | ├── tools
30 | ├── configs
31 | ├── data
32 | │ ├── hrsc
33 | │ │ ├── FullDataSet
34 | │ │ │ ├─ AllImages
35 | │ │ │ ├─ Annotations
36 | │ │ │ ├─ LandMask
37 | │ │ │ ├─ Segmentations
38 | │ │ ├── ImageSets
39 | ```
40 |
41 | ## Change base config
42 |
43 | Please change `data_root` in `configs/_base_/datasets/hrsc.py` to `data/hrsc/`.
44 |
--------------------------------------------------------------------------------
/tools/data/hrsid/README.md:
--------------------------------------------------------------------------------
1 | # Preparing HRSID Dataset
2 |
3 |
4 |
5 | ```bibtex
6 | @ARTICLE{HRSID_2020,
7 | author={Wei, Shunjun and Zeng, Xiangfeng and Qu, Qizhe and Wang, Mou and Su, Hao and Shi, Jun},
8 | journal={IEEE Access},
9 | title={HRSID: A High-Resolution SAR Images Dataset for Ship Detection and Instance Segmentation},
10 | year={2020},
11 | volume={8},
12 | pages={120234-120254},
13 | }
14 | ```
15 |
16 | ## Download HRSID dataset
17 |
18 | The HRSID dataset can be downloaded from [Google drive](https://drive.google.com/file/d/1BZTU8Gyg20wqHXtBPFzRazn_lEdvhsbE/view).
19 |
20 | The data structure is as follows:
21 |
22 | ```none
23 | mmrotate
24 | ├── mmrotate
25 | ├── tools
26 | ├── configs
27 | ├── data
28 | │ ├── HRSID_JPG
29 | │ │ ├── JPEGImages
30 | │ │ ├── annotations
31 | ```
32 |
33 | ## Change base config
34 |
35 | Please change `data_root` in `configs/_base_/datasets/hrisd.py` to `data/HRSID_JPG/`.
36 |
--------------------------------------------------------------------------------
/tools/data/rsdd/README.md:
--------------------------------------------------------------------------------
1 | # Preparing RSDD Dataset
2 |
3 |
4 |
5 | ```bibtex
6 | @ARTICLE{RSDD2022,
7 | author = {C. Xu, H. Su, J. Li, Y. Liu, L. Yao, L. Gao, W. Yan and T. Wang},
8 | title = {RSDD-SAR: Rotated Ship Detection Dataset in SAR Images},
9 | journal = {Journal of Radars},
10 | month = {Sep.},
11 | year = {2022},
12 | volume={11},
13 | number={R22007},
14 | pages={581},
15 | }
16 | ```
17 |
18 | ## Download RSDD dataset
19 |
20 | The RSDD dataset can be downloaded from [Google drive](https://drive.google.com/file/d/1PJxr7Tbr_ZAzuG8MNloDa4mLaRYCD3qc/view?usp=sharing).
21 |
22 | The data structure is as follows:
23 |
24 | ```none
25 | mmrotate
26 | ├── mmrotate
27 | ├── tools
28 | ├── configs
29 | ├── data
30 | │ ├── rsdd
31 | │ │ ├── Annotations
32 | │ │ ├── ImageSets
33 | │ │ ├── JPEGImages
34 | │ │ ├── JPEGValidation
35 | ```
36 |
37 | ## Change base config
38 |
39 | Please change `data_root` in `configs/_base_/datasets/rsdd.py` to `data/rsdd/`.
40 |
--------------------------------------------------------------------------------
/tools/data/srsdd/README.md:
--------------------------------------------------------------------------------
1 | # Preparing SRSDD Dataset
2 |
3 |
4 |
5 | ```bibtex
6 | @ARTICLE{SRSDD2021,
7 | author = {S. Lei, D. Lu and X. Qiu},
8 | title = {SRSDD-v1.0: A high-resolution SAR rotation ship
9 | detection dataset},
10 | journal = {Remote Senseing},
11 | month = {Dec.},
12 | year = {2021},
13 | volume={13},
14 | number={24},
15 | pages={5104},
16 | }
17 | ```
18 |
19 | ## Download SRSDD dataset
20 |
21 | The SRSDD dataset can be downloaded from [Google drive](https://drive.google.com/file/d/1QtCjih1ChOmG-TOPUTlsL3WbMh0L-1zp/view?usp=sharing).
22 |
23 | The data structure is as follows:
24 |
25 | ```none
26 | mmrotate
27 | ├── mmrotate
28 | ├── tools
29 | ├── configs
30 | ├── data
31 | │ ├── srsdd
32 | │ │ ├── train
33 | │ │ ├── test
34 | ```
35 |
36 | ## Change base config
37 |
38 | Please change `data_root` in `configs/_base_/datasets/srsdd.py` to `data/srsdd/`.
39 |
--------------------------------------------------------------------------------
/tools/data/ssdd/README.md:
--------------------------------------------------------------------------------
1 | # Preparing SSDD Dataset
2 |
3 |
4 |
5 | ```bibtex
6 | @ARTICLE{SSDD2021,
7 | author = {T. Zhang, X. Zhang, J. Li and X. Xu},
8 | title = {SAR ship detection dataset (SSDD): Official release and comprehensive data analysis},
9 | journal = {Remote Senseing},
10 | month = {Sep.},
11 | year = {2021}
12 | volume={13},
13 | number={18},
14 | pages={3690},
15 | }
16 | ```
17 |
18 | ## Download SSDD dataset
19 |
20 | The SSDD dataset can be downloaded from [Google drive](https://drive.google.com/file/d/1LmoHBk4xUvm0Zdtm8X7256dHigyFW4Nh/view?usp=sharing).
21 |
22 | The data structure is as follows:
23 |
24 | ```none
25 | mmrotate
26 | ├── mmrotate
27 | ├── tools
28 | ├── configs
29 | ├── data
30 | │ ├── ssdd
31 | │ │ ├── train
32 | │ │ ├── test
33 | │ │ │ ├── all
34 | │ │ │ ├── inshore
35 | │ │ │ ├── offshore
36 | ```
37 |
38 | ## Change base config
39 |
40 | Please change `data_root` in `configs/_base_/datasets/ssdd.py` to `data/ssdd/`.
41 |
--------------------------------------------------------------------------------
/tools/dist_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | CONFIG=$1
4 | CHECKPOINT=$2
5 | GPUS=$3
6 | NNODES=${NNODES:-1}
7 | NODE_RANK=${NODE_RANK:-0}
8 | PORT=${PORT:-29500}
9 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
10 |
11 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
12 | python -m torch.distributed.launch \
13 | --nnodes=$NNODES \
14 | --node_rank=$NODE_RANK \
15 | --master_addr=$MASTER_ADDR \
16 | --nproc_per_node=$GPUS \
17 | --master_port=$PORT \
18 | $(dirname "$0")/test.py \
19 | $CONFIG \
20 | $CHECKPOINT \
21 | --launcher pytorch \
22 | ${@:4}
23 |
--------------------------------------------------------------------------------
/tools/dist_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | CONFIG=$1
4 | GPUS=$2
5 | NNODES=${NNODES:-1}
6 | NODE_RANK=${NODE_RANK:-0}
7 | PORT=${PORT:-29500}
8 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
9 |
10 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
11 | python -m torch.distributed.launch \
12 | --nnodes=$NNODES \
13 | --node_rank=$NODE_RANK \
14 | --master_addr=$MASTER_ADDR \
15 | --nproc_per_node=$GPUS \
16 | --master_port=$PORT \
17 | $(dirname "$0")/train.py \
18 | $CONFIG \
19 | --launcher pytorch ${@:3}
20 |
--------------------------------------------------------------------------------
/tools/misc/print_config.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import argparse
3 |
4 | from mmengine import Config, DictAction
5 |
6 |
7 | def parse_args():
8 | """Parse arguments."""
9 | parser = argparse.ArgumentParser(description='Print the whole config')
10 | parser.add_argument('config', help='config file path')
11 | parser.add_argument(
12 | '--cfg-options',
13 | nargs='+',
14 | action=DictAction,
15 | help='override some settings in the used config, the key-value pair '
16 | 'in xxx=yyy format will be merged into config file. If the value to '
17 | 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
18 | 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
19 | 'Note that the quotation marks are necessary and that no white space '
20 | 'is allowed.')
21 | args = parser.parse_args()
22 |
23 | return args
24 |
25 |
26 | def main():
27 | """Print config."""
28 | args = parse_args()
29 |
30 | cfg = Config.fromfile(args.config)
31 |
32 | if args.cfg_options is not None:
33 | cfg.merge_from_dict(args.cfg_options)
34 | print(f'Config:\n{cfg.pretty_text}')
35 |
36 |
37 | if __name__ == '__main__':
38 | main()
39 |
--------------------------------------------------------------------------------
/tools/model_converters/publish_model.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import argparse
3 | import subprocess
4 |
5 | import torch
6 |
7 |
8 | def parse_args():
9 | """Parse parameters."""
10 | parser = argparse.ArgumentParser(
11 | description='Process a checkpoint to be published')
12 | parser.add_argument('in_file', help='input checkpoint filename')
13 | parser.add_argument('out_file', help='output checkpoint filename')
14 | args = parser.parse_args()
15 | return args
16 |
17 |
18 | def process_checkpoint(in_file, out_file):
19 | """Only inference related parameters are retained.
20 |
21 | Args:
22 | in_file (str): Filename of input checkpoint.
23 | out_file (str): Filename of output checkpoint.
24 | """
25 | checkpoint = torch.load(in_file, map_location='cpu')
26 | # remove optimizer for smaller file size
27 | if 'optimizer' in checkpoint:
28 | del checkpoint['optimizer']
29 | # if it is necessary to remove some sensitive data in checkpoint['meta'],
30 | # add the code here.
31 | if torch.__version__ >= '1.6':
32 | torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
33 | else:
34 | torch.save(checkpoint, out_file)
35 | sha = subprocess.check_output(['sha256sum', out_file]).decode()
36 | if out_file.endswith('.pth'):
37 | out_file_name = out_file[:-4]
38 | else:
39 | out_file_name = out_file
40 | final_file = out_file_name + f'-{sha[:8]}.pth'
41 | subprocess.Popen(['mv', out_file, final_file])
42 |
43 |
44 | def main():
45 | """Main function of publish model."""
46 | args = parse_args()
47 | process_checkpoint(args.in_file, args.out_file)
48 |
49 |
50 | if __name__ == '__main__':
51 | main()
52 |
--------------------------------------------------------------------------------
/tools/slurm_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | CHECKPOINT=$4
9 | GPUS=${GPUS:-8}
10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12 | PY_ARGS=${@:5}
13 | SRUN_ARGS=${SRUN_ARGS:-""}
14 |
15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16 | srun -p ${PARTITION} \
17 | --job-name=${JOB_NAME} \
18 | --gres=gpu:${GPUS_PER_NODE} \
19 | --ntasks=${GPUS} \
20 | --ntasks-per-node=${GPUS_PER_NODE} \
21 | --cpus-per-task=${CPUS_PER_TASK} \
22 | --kill-on-bad-exit=1 \
23 | ${SRUN_ARGS} \
24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS}
25 |
--------------------------------------------------------------------------------
/tools/slurm_train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 |
5 | PARTITION=$1
6 | JOB_NAME=$2
7 | CONFIG=$3
8 | WORK_DIR=$4
9 | GPUS=${GPUS:-8}
10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5}
12 | SRUN_ARGS=${SRUN_ARGS:-""}
13 | PY_ARGS=${@:5}
14 |
15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
16 | srun -p ${PARTITION} \
17 | --job-name=${JOB_NAME} \
18 | --gres=gpu:${GPUS_PER_NODE} \
19 | --ntasks=${GPUS} \
20 | --ntasks-per-node=${GPUS_PER_NODE} \
21 | --cpus-per-task=${CPUS_PER_TASK} \
22 | --kill-on-bad-exit=1 \
23 | ${SRUN_ARGS} \
24 | python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS}
25 |
--------------------------------------------------------------------------------