├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── configs ├── _base_ │ ├── datasets │ │ ├── fine_tune_based │ │ │ ├── base_coco.py │ │ │ ├── base_dior-trainval_bs16.py │ │ │ ├── base_dior.py │ │ │ ├── base_dior_1k.py │ │ │ ├── base_dior_bs16.py │ │ │ ├── base_dior_bs16_1k.py │ │ │ ├── base_dior_bs4_1k.py │ │ │ ├── base_dior_bs6_1k.py │ │ │ ├── base_dior_color.py │ │ │ ├── base_dior_vit.py │ │ │ ├── base_dior_vit_bs4.py │ │ │ ├── base_dumpsite.py │ │ │ ├── base_isaid.py │ │ │ ├── base_isaid_1k.py │ │ │ ├── base_isaid_bs16.py │ │ │ ├── base_isaid_color.py │ │ │ ├── base_nwpu.py │ │ │ ├── base_voc.py │ │ │ ├── few_shot_coco.py │ │ │ ├── few_shot_dior-trainval_bs8.py │ │ │ ├── few_shot_dior_1k.py │ │ │ ├── few_shot_dior_bs16.py │ │ │ ├── few_shot_dior_bs8.py │ │ │ ├── few_shot_dior_noneg.py │ │ │ ├── few_shot_dior_simple.py │ │ │ ├── few_shot_isaid.py │ │ │ ├── few_shot_isaid_1k.py │ │ │ ├── few_shot_isaid_min-iou_1k.py │ │ │ ├── few_shot_nwpu.py │ │ │ └── few_shot_voc.py │ │ ├── nway_kshot │ │ │ ├── base_coco.py │ │ │ ├── base_dior.py │ │ │ ├── base_voc.py │ │ │ ├── few_shot_coco.py │ │ │ ├── few_shot_dior.py │ │ │ └── few_shot_voc.py │ │ ├── query_aware │ │ │ ├── base_coco.py │ │ │ ├── base_voc.py │ │ │ ├── few_shot_coco.py │ │ │ └── few_shot_voc.py │ │ └── two_branch │ │ │ ├── base_coco.py │ │ │ ├── base_voc.py │ │ │ ├── few_shot_coco.py │ │ │ └── few_shot_voc.py │ ├── default_runtime.py │ ├── models │ │ ├── faster_rcnn_r50_caffe_c4.py │ │ ├── faster_rcnn_r50_caffe_fpn.py │ │ └── mask_rcnn_r50_fpn.py │ └── schedules │ │ ├── adamw_10k.py │ │ ├── adamw_10k_1e-5.py │ │ ├── adamw_10k_6e-5.py │ │ ├── adamw_160k.py │ │ ├── adamw_18k.py │ │ ├── adamw_1k.py │ │ ├── adamw_20k.py │ │ ├── adamw_20k_2e-5.py │ │ ├── adamw_20k_6e-5.py │ │ ├── adamw_2k.py │ │ ├── adamw_40k.py │ │ ├── adamw_40k_1e-5.py │ │ ├── adamw_40k_6e-5.py │ │ ├── adamw_5k.py │ │ ├── adamw_80k.py │ │ ├── schedule.py │ │ └── sgd_20k.py └── st_fsod │ ├── dior │ ├── split1 │ │ ├── seed0 │ │ │ └── st-fsod │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-split1_seed0_10shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-split1_seed0_20shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-split1_seed0_3shot-fine-tuning.py │ │ │ │ └── st-fsod_maskrcnn_r101_dior-split1_seed0_5shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r101_40k_dior-split1_base-training.py │ ├── split2 │ │ ├── seed0 │ │ │ └── st-fsod │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split2_seed0_10shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split2_seed0_20shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split2_seed0_3shot-fine-tuning.py │ │ │ │ └── st-fsod_maskrcnn_r101_dior-trainval-split2_seed0_5shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r101_40k_dior-trainval-split2_base-training.py │ ├── split3 │ │ ├── seed0 │ │ │ └── st-fsod │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split3_seed0_10shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split3_seed0_20shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split3_seed0_3shot-fine-tuning.py │ │ │ │ └── st-fsod_maskrcnn_r101_dior-trainval-split3_seed0_5shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r101_40k_dior-trainval-split3_base-training.py │ ├── split4 │ │ ├── seed0 │ │ │ └── st-fsod │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split4_seed0_10shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split4_seed0_20shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split4_seed0_3shot-fine-tuning.py │ │ │ │ └── st-fsod_maskrcnn_r101_dior-trainval-split4_seed0_5shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r101_40k_dior-trainval-split4_base-training.py │ └── split5 │ │ ├── seed0 │ │ └── st-fsod │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split5_seed0_10shot-fine-tuning.py │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split5_seed0_20shot-fine-tuning.py │ │ │ ├── st-fsod_maskrcnn_r101_dior-trainval-split5_seed0_3shot-fine-tuning.py │ │ │ └── st-fsod_maskrcnn_r101_dior-trainval-split5_seed0_5shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r101_40k_dior-trainval-split5_base-training.py │ ├── isaid │ ├── split1 │ │ ├── seed0 │ │ │ └── st-fsod │ │ │ │ ├── st-fsod_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py │ │ │ │ └── st-fsod_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r50_80k_isaid-split1_base-training.py │ ├── split2 │ │ ├── seed0 │ │ │ └── st-fsod │ │ │ │ ├── st-fsod_maskrcnn_r50_isaid-split2_seed0_100shot-fine-tuning.py │ │ │ │ ├── st-fsod_maskrcnn_r50_isaid-split2_seed0_10shot-fine-tuning.py │ │ │ │ └── st-fsod_maskrcnn_r50_isaid-split2_seed0_50shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r50_80k_isaid-split2_base-training.py │ └── split3 │ │ ├── seed0 │ │ └── st-fsod │ │ │ ├── st-fsod_maskrcnn_r50_isaid-split3_seed0_100shot-fine-tuning.py │ │ │ ├── st-fsod_maskrcnn_r50_isaid-split3_seed0_10shot-fine-tuning.py │ │ │ └── st-fsod_maskrcnn_r50_isaid-split3_seed0_50shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r50_80k_isaid-split3_base-training.py │ ├── nwpu │ └── split1 │ │ ├── seed0 │ │ └── st-fsod │ │ │ ├── st-fsod_maskrcnn_r50_2k_nwpu-split1_seed0_10shot-fine-tuning.py │ │ │ ├── st-fsod_maskrcnn_r50_2k_nwpu-split1_seed0_20shot-fine-tuning.py │ │ │ ├── st-fsod_maskrcnn_r50_2k_nwpu-split1_seed0_3shot-fine-tuning.py │ │ │ └── st-fsod_maskrcnn_r50_2k_nwpu-split1_seed0_5shot-fine-tuning.py │ │ └── st-fsod_maskrcnn_r50_10k_nwpu-split1_base-training.py │ └── tfa_maskrcnn_r50.py ├── environment.yaml ├── model-index.yml ├── requirements.txt ├── requirements ├── build.txt ├── docs.txt ├── mminstall.txt ├── optional.txt ├── readthedocs.txt ├── runtime.txt └── tests.txt ├── rsifewshot ├── .mim │ ├── configs │ ├── demo │ ├── model-index.yml │ └── tools ├── __init__.py ├── classification │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ └── evaluation │ │ │ ├── __init__.py │ │ │ └── eval_hooks.py │ ├── datasets │ │ ├── __init__.py │ │ ├── base.py │ │ ├── builder.py │ │ ├── cub.py │ │ ├── dataset_wrappers.py │ │ ├── mini_imagenet.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ └── loading.py │ │ ├── tiered_imagenet.py │ │ └── utils.py │ ├── models │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── conv4.py │ │ │ ├── resnet12.py │ │ │ ├── utils.py │ │ │ └── wrn.py │ │ ├── classifiers │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── base_finetune.py │ │ │ ├── base_metric.py │ │ │ ├── baseline.py │ │ │ ├── baseline_plus.py │ │ │ ├── maml.py │ │ │ ├── matching_net.py │ │ │ ├── meta_baseline.py │ │ │ ├── neg_margin.py │ │ │ ├── proto_net.py │ │ │ └── relation_net.py │ │ ├── heads │ │ │ ├── __init__.py │ │ │ ├── base_head.py │ │ │ ├── cosine_distance_head.py │ │ │ ├── linear_head.py │ │ │ ├── matching_head.py │ │ │ ├── meta_baseline_head.py │ │ │ ├── neg_margin_head.py │ │ │ ├── prototype_head.py │ │ │ └── relation_head.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── mse_loss.py │ │ │ └── nll_loss.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── maml_module.py │ └── utils │ │ ├── __init__.py │ │ └── meta_test_parallel.py ├── detection │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ ├── bbox │ │ │ ├── __init__.py │ │ │ ├── assigners │ │ │ │ ├── __init__.py │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ ├── assign_result.py │ │ │ │ ├── atss_assigner.py │ │ │ │ ├── base_assigner.py │ │ │ │ ├── center_region_assigner.py │ │ │ │ ├── grid_assigner.py │ │ │ │ ├── hungarian_assigner.py │ │ │ │ ├── mask_hungarian_assigner.py │ │ │ │ ├── max_iou_assigner.py │ │ │ │ ├── point_assigner.py │ │ │ │ ├── region_assigner.py │ │ │ │ ├── sim_ota_assigner.py │ │ │ │ ├── task_aligned_assigner.py │ │ │ │ └── uniform_assigner.py │ │ │ ├── builder.py │ │ │ ├── coder │ │ │ │ ├── __init__.py │ │ │ │ ├── base_bbox_coder.py │ │ │ │ ├── bucketing_bbox_coder.py │ │ │ │ ├── delta_xywh_bbox_coder.py │ │ │ │ ├── distance_point_bbox_coder.py │ │ │ │ ├── legacy_delta_xywh_bbox_coder.py │ │ │ │ ├── pseudo_bbox_coder.py │ │ │ │ ├── tblr_bbox_coder.py │ │ │ │ └── yolo_bbox_coder.py │ │ │ ├── demodata.py │ │ │ ├── iou_calculators │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ └── iou2d_calculator.py │ │ │ ├── match_costs │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ └── match_cost.py │ │ │ ├── samplers │ │ │ │ ├── __init__.py │ │ │ │ ├── base_sampler.py │ │ │ │ ├── combined_sampler.py │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ ├── mask_pseudo_sampler.py │ │ │ │ ├── mask_sampling_result.py │ │ │ │ ├── ohem_sampler.py │ │ │ │ ├── pseudo_sampler.py │ │ │ │ ├── random_sampler.py │ │ │ │ ├── sampling_result.py │ │ │ │ └── score_hlr_sampler.py │ │ │ └── transforms.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── eval_hooks.py │ │ │ └── mean_ap.py │ │ ├── hook │ │ │ ├── __init__.py │ │ │ └── wandblogger_hook.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── custom_hook.py │ ├── datasets │ │ ├── __init__.py │ │ ├── base.py │ │ ├── base_eo_fewshot.py │ │ ├── builder.py │ │ ├── coco.py │ │ ├── dataloader_wrappers.py │ │ ├── dataset_wrappers.py │ │ ├── dior.py │ │ ├── dior_caption.py │ │ ├── dumpsite.py │ │ ├── isaid.py │ │ ├── nwpu.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── formatting.py │ │ │ ├── rsi_aug.py │ │ │ └── transforms.py │ │ ├── utils.py │ │ └── voc.py │ ├── models │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ └── resnet_with_meta_conv.py │ │ ├── builder.py │ │ ├── dense_heads │ │ │ ├── __init__.py │ │ │ ├── attention_rpn_head.py │ │ │ ├── neg_rpn_head.py │ │ │ ├── sam_head.py │ │ │ ├── st_rpn_head.py │ │ │ ├── st_rpn_head_v2.py │ │ │ ├── st_rpn_head_v3.py │ │ │ ├── st_rpn_head_v4.py │ │ │ ├── two_branch_rpn_head.py │ │ │ └── ufd_rpn_head.py │ │ ├── detectors │ │ │ ├── __init__.py │ │ │ ├── attention_rpn_detector.py │ │ │ ├── fsce.py │ │ │ ├── fsdetview.py │ │ │ ├── meta_rcnn.py │ │ │ ├── mpsr.py │ │ │ ├── neg_rpn.py │ │ │ ├── neg_rpn_meta_rcnn.py │ │ │ ├── neg_rpn_query_support_detector.py │ │ │ ├── query_support_detector.py │ │ │ ├── st_tfa.py │ │ │ ├── st_tfa_v2.py │ │ │ └── tfa.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── supervised_contrastive_loss.py │ │ │ └── token_sigmoid_focal_loss.py │ │ ├── roi_heads │ │ │ ├── __init__.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── contrastive_bbox_head.py │ │ │ │ ├── cosine_sim_bbox_head.py │ │ │ │ ├── cosine_sim_st_bbox_head.py │ │ │ │ ├── cosine_sim_st_bbox_head_v2.py │ │ │ │ ├── meta_bbox_head.py │ │ │ │ ├── multi_relation_bbox_head.py │ │ │ │ └── two_branch_bbox_head.py │ │ │ ├── contrastive_roi_head.py │ │ │ ├── fsdetview_roi_head.py │ │ │ ├── meta_rcnn_roi_head.py │ │ │ ├── multi_relation_roi_head.py │ │ │ ├── neg_rpn_fsdetview_roi_head.py │ │ │ ├── neg_rpn_meta_rcnn_roi_head.py │ │ │ ├── neg_rpn_roi_head.py │ │ │ ├── shared_heads │ │ │ │ ├── __init__.py │ │ │ │ └── meta_rcnn_res_layer.py │ │ │ ├── st_roi_head.py │ │ │ ├── st_roi_head_v2.py │ │ │ ├── st_roi_head_v4.py │ │ │ └── two_branch_roi_head.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── aggregation_layer.py │ └── wandb │ │ ├── __pycache__ │ │ ├── det_wandb_visualizer.cpython-310.pyc │ │ └── det_wandb_visualizer.cpython-37.pyc │ │ └── det_wandb_visualizer.py ├── utils │ ├── __init__.py │ ├── collate.py │ ├── collect_env.py │ ├── compat_config.py │ ├── dist_utils.py │ ├── infinite_sampler.py │ ├── local_seed.py │ ├── logger.py │ └── runner.py └── version.py ├── scripts ├── meta_train_st-tfa_dior-trainval.sh ├── test_st-tfa_dior_split2_seed0.sh ├── test_st-tfa_isaid_split1.sh ├── test_st-tfa_isaid_split1_seed1.sh ├── test_st-tfa_isaid_split1_seed2.sh ├── test_st-tfa_isaid_split1_sensitivity.sh ├── test_st-tfa_isaid_split2.sh ├── test_st-tfa_isaid_split2_seed1.sh ├── test_st-tfa_isaid_split2_seed2.sh ├── test_st-tfa_isaid_split3.sh ├── test_st-tfa_isaid_split3_seed1.sh ├── test_st-tfa_isaid_split3_seed2.sh ├── test_st-tfa_no-roi_isaid_split1.sh ├── test_st-tfa_no-rpn_isaid_split1.sh ├── test_tfa_balance_isaid_split1.sh ├── test_tfa_isaid_split1.sh ├── train_base_dior-trainval.sh ├── train_base_dior.sh ├── train_ft_dior_split2.sh ├── train_ft_dior_split3.sh ├── train_ft_dior_split4.sh ├── train_ft_dior_split5.sh ├── train_st-tfa_dior-trainval_split2_seed0.sh ├── train_st-tfa_dior-trainval_split2_seed1.sh ├── train_st-tfa_dior-trainval_split2_seed2.sh ├── train_st-tfa_dior-trainval_split3_seed0.sh ├── train_st-tfa_dior-trainval_split3_seed1.sh ├── train_st-tfa_dior-trainval_split3_seed2.sh ├── train_st-tfa_dior-trainval_split4_seed0.sh ├── train_st-tfa_dior-trainval_split4_seed1.sh ├── train_st-tfa_dior-trainval_split4_seed2.sh ├── train_st-tfa_dior-trainval_split5_seed0.sh ├── train_st-tfa_dior-trainval_split5_seed1.sh ├── train_st-tfa_dior-trainval_split5_seed2.sh ├── train_st-tfa_dior_split1.sh ├── train_st-tfa_dior_split1_seed1.sh ├── train_st-tfa_dior_split1_seed2.sh ├── train_st-tfa_dior_split2.sh ├── train_st-tfa_dior_split2_seed1.sh ├── train_st-tfa_dior_split2_seed2.sh ├── train_st-tfa_dior_split3.sh ├── train_st-tfa_dior_split3_seed1.sh ├── train_st-tfa_dior_split3_seed2.sh ├── train_st-tfa_dior_split4.sh ├── train_st-tfa_dior_split4_seed1.sh ├── train_st-tfa_dior_split4_seed2.sh ├── train_st-tfa_dior_split5.sh ├── train_st-tfa_dior_split5_seed1.sh ├── train_st-tfa_dior_split5_seed2.sh ├── train_st-tfa_isaid_split1.sh ├── train_st-tfa_isaid_split1_no-rpn.sh ├── train_st-tfa_isaid_split1_seed1.sh ├── train_st-tfa_isaid_split1_seed2.sh ├── train_st-tfa_isaid_split1_sensitivity.sh ├── train_st-tfa_isaid_split2.sh ├── train_st-tfa_isaid_split2_seed1.sh ├── train_st-tfa_isaid_split2_seed2.sh ├── train_st-tfa_isaid_split3.sh ├── train_st-tfa_isaid_split3_seed1.sh ├── train_st-tfa_isaid_split3_seed2.sh ├── train_st-tfa_nwpu_2k_split1_seed0.sh ├── train_st-tfa_nwpu_2k_split1_seed1.sh ├── train_st-tfa_nwpu_2k_split1_seed2.sh ├── train_st-tfa_nwpu_split1_seed0.sh ├── train_st-tfa_nwpu_split1_seed1.sh ├── train_st-tfa_nwpu_split1_seed2.sh ├── train_st_tfa_dior_split1.sh ├── train_st_tfa_isaid_split1_roi-thre80-bg00_unlabeled.sh ├── train_st_tfa_isaid_split1_roi-thre80_unlabeled.sh ├── train_st_tfa_isaid_split1_sensitivity_roi.sh ├── train_st_tfa_isaid_split1_sensitivity_roi_v2.sh ├── train_st_tfa_isaid_split1_sensitivity_rpn.sh ├── train_st_tfa_isaid_split1_unlabeled.sh ├── train_st_tfa_no-roi_isaid_split1.sh ├── train_st_tfa_no-rpn_isaid_split1.sh ├── train_st_tfa_no-rpn_isaid_split1_v2.sh ├── train_tfa_balance_dior_split1.sh ├── train_tfa_balance_dior_split2.sh ├── train_tfa_balance_isaid_split1.sh ├── train_tfa_dior_split1.sh ├── train_tfa_dior_split2.sh └── train_tfa_isaid_split1.sh ├── setup.cfg ├── setup.py └── tools ├── classification ├── dist_test.sh ├── dist_train.sh ├── slurm_test.sh ├── slurm_train.sh ├── test.py └── train.py ├── data ├── README.md ├── classification │ ├── README.md │ ├── cub │ │ └── README.md │ ├── mini-imagenet │ │ └── README.md │ └── tiered-imagenet │ │ ├── README.md │ │ └── unzip_tiered_imagenet.py └── detection │ ├── README.md │ ├── coco │ └── README.md │ └── voc │ └── README.md ├── detection ├── analysis_tools │ ├── analyze_logs.py │ ├── analyze_results.py │ ├── benchmark.py │ ├── coco_error_analysis.py │ ├── confusion_matrix.py │ ├── eval_metric.py │ ├── get_flops.py │ ├── optimize_anchors.py │ ├── robustness_eval.py │ └── test_robustness.py ├── dist_test.sh ├── dist_train.sh ├── misc │ ├── initialize_bbox_head.py │ └── visualize_saved_dataset.py ├── slurm_test.sh ├── slurm_train.sh ├── test.py └── train.py └── misc └── print_config.py /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements/*.txt 2 | include rsifewshot/VERSION 3 | include rsifewshot/.mim/model-index.yml 4 | include rsifewshot/.mim/demo/*/* 5 | recursive-include rsifewshot/.mim/configs *.py *.yml 6 | recursive-include rsifewshot/.mim/tools *.sh *.py 7 | -------------------------------------------------------------------------------- /configs/_base_/datasets/fine_tune_based/base_coco.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | img_norm_cfg = dict( 3 | mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) 4 | train_pipeline = [ 5 | dict(type='LoadImageFromFile'), 6 | dict(type='LoadAnnotations', with_bbox=True), 7 | dict( 8 | type='Resize', 9 | img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), 10 | (1333, 768), (1333, 800)], 11 | keep_ratio=True, 12 | multiscale_mode='value'), 13 | dict(type='RandomFlip', flip_ratio=0.5), 14 | dict(type='Normalize', **img_norm_cfg), 15 | dict(type='Pad', size_divisor=32), 16 | dict(type='DefaultFormatBundle'), 17 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) 18 | ] 19 | test_pipeline = [ 20 | dict(type='LoadImageFromFile'), 21 | dict( 22 | type='MultiScaleFlipAug', 23 | img_scale=(1333, 800), 24 | flip=False, 25 | transforms=[ 26 | dict(type='Resize', keep_ratio=True), 27 | dict(type='RandomFlip'), 28 | dict(type='Normalize', **img_norm_cfg), 29 | dict(type='Pad', size_divisor=32), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img']) 32 | ]) 33 | ] 34 | # classes splits are predefined in FewShotCocoDataset 35 | data_root = 'data/coco/' 36 | data = dict( 37 | samples_per_gpu=2, 38 | workers_per_gpu=2, 39 | train=dict( 40 | type='FewShotCocoDataset', 41 | save_dataset=False, 42 | ann_cfg=[ 43 | dict( 44 | type='ann_file', 45 | ann_file='data/few_shot_ann/coco/annotations/train.json') 46 | ], 47 | img_prefix=data_root, 48 | pipeline=train_pipeline, 49 | classes='BASE_CLASSES'), 50 | val=dict( 51 | type='FewShotCocoDataset', 52 | ann_cfg=[ 53 | dict( 54 | type='ann_file', 55 | ann_file='data/few_shot_ann/coco/annotations/val.json') 56 | ], 57 | img_prefix=data_root, 58 | pipeline=test_pipeline, 59 | classes='BASE_CLASSES'), 60 | test=dict( 61 | type='FewShotCocoDataset', 62 | ann_cfg=[ 63 | dict( 64 | type='ann_file', 65 | ann_file='data/few_shot_ann/coco/annotations/val.json') 66 | ], 67 | img_prefix=data_root, 68 | pipeline=test_pipeline, 69 | test_mode=True, 70 | classes='BASE_CLASSES')) 71 | evaluation = dict(interval=5000, metric='bbox', classwise=True) 72 | -------------------------------------------------------------------------------- /configs/_base_/datasets/fine_tune_based/base_voc.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | img_norm_cfg = dict( 3 | mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) 4 | train_pipeline = [ 5 | dict(type='LoadImageFromFile'), 6 | dict(type='LoadAnnotations', with_bbox=True), 7 | dict( 8 | type='Resize', 9 | img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576), 10 | (1333, 608), (1333, 640), (1333, 672), (1333, 704), 11 | (1333, 736), (1333, 768), (1333, 800)], 12 | keep_ratio=True, 13 | multiscale_mode='value'), 14 | dict(type='RandomFlip', flip_ratio=0.5), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size_divisor=32), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) 19 | ] 20 | test_pipeline = [ 21 | dict(type='LoadImageFromFile'), 22 | dict( 23 | type='MultiScaleFlipAug', 24 | img_scale=(1333, 800), 25 | flip=False, 26 | transforms=[ 27 | dict(type='Resize', keep_ratio=True), 28 | dict(type='RandomFlip'), 29 | dict(type='Normalize', **img_norm_cfg), 30 | dict(type='Pad', size_divisor=32), 31 | dict(type='ImageToTensor', keys=['img']), 32 | dict(type='Collect', keys=['img']) 33 | ]) 34 | ] 35 | # classes splits are predefined in FewShotVOCDataset 36 | data_root = 'data/VOCdevkit/' 37 | data = dict( 38 | samples_per_gpu=2, 39 | workers_per_gpu=2, 40 | train=dict( 41 | type='FewShotVOCDataset', 42 | save_dataset=False, 43 | ann_cfg=[ 44 | dict( 45 | type='ann_file', 46 | ann_file=data_root + 'VOC2007/ImageSets/Main/trainval.txt'), 47 | dict( 48 | type='ann_file', 49 | ann_file=data_root + 'VOC2012/ImageSets/Main/trainval.txt') 50 | ], 51 | img_prefix=data_root, 52 | pipeline=train_pipeline, 53 | classes=None, 54 | use_difficult=True, 55 | instance_wise=False), 56 | val=dict( 57 | type='FewShotVOCDataset', 58 | ann_cfg=[ 59 | dict( 60 | type='ann_file', 61 | ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt') 62 | ], 63 | img_prefix=data_root, 64 | pipeline=test_pipeline, 65 | classes=None, 66 | ), 67 | test=dict( 68 | type='FewShotVOCDataset', 69 | ann_cfg=[ 70 | dict( 71 | type='ann_file', 72 | ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt') 73 | ], 74 | img_prefix=data_root, 75 | pipeline=test_pipeline, 76 | test_mode=True, 77 | classes=None, 78 | )) 79 | evaluation = dict(interval=3000, metric='mAP') 80 | -------------------------------------------------------------------------------- /configs/_base_/datasets/fine_tune_based/few_shot_coco.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | img_norm_cfg = dict( 3 | mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) 4 | train_pipeline = [ 5 | dict(type='LoadImageFromFile'), 6 | dict(type='LoadAnnotations', with_bbox=True), 7 | dict( 8 | type='Resize', 9 | img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), 10 | (1333, 768), (1333, 800)], 11 | keep_ratio=True, 12 | multiscale_mode='value'), 13 | dict(type='RandomFlip', flip_ratio=0.5), 14 | dict(type='Normalize', **img_norm_cfg), 15 | dict(type='Pad', size_divisor=32), 16 | dict(type='DefaultFormatBundle'), 17 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) 18 | ] 19 | test_pipeline = [ 20 | dict(type='LoadImageFromFile'), 21 | dict( 22 | type='MultiScaleFlipAug', 23 | img_scale=(1333, 800), 24 | flip=False, 25 | transforms=[ 26 | dict(type='Resize', keep_ratio=True), 27 | dict(type='RandomFlip'), 28 | dict(type='Normalize', **img_norm_cfg), 29 | dict(type='Pad', size_divisor=32), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img']) 32 | ]) 33 | ] 34 | # classes splits are predefined in FewShotCocoDataset 35 | data_root = 'data/coco/' 36 | data = dict( 37 | samples_per_gpu=2, 38 | workers_per_gpu=2, 39 | train=dict( 40 | save_dataset=True, 41 | type='FewShotCocoDataset', 42 | ann_cfg=[ 43 | dict( 44 | type='ann_file', 45 | ann_file='data/few_shot_ann/coco/annotations/train.json') 46 | ], 47 | img_prefix=data_root, 48 | num_novel_shots=None, 49 | num_base_shots=None, 50 | pipeline=train_pipeline, 51 | classes='ALL_CLASSES', 52 | instance_wise=False), 53 | val=dict( 54 | type='FewShotCocoDataset', 55 | ann_cfg=[ 56 | dict( 57 | type='ann_file', 58 | ann_file='data/few_shot_ann/coco/annotations/val.json') 59 | ], 60 | img_prefix=data_root, 61 | pipeline=test_pipeline, 62 | classes='ALL_CLASSES'), 63 | test=dict( 64 | type='FewShotCocoDataset', 65 | ann_cfg=[ 66 | dict( 67 | type='ann_file', 68 | ann_file='data/few_shot_ann/coco/annotations/val.json') 69 | ], 70 | img_prefix=data_root, 71 | pipeline=test_pipeline, 72 | test_mode=True, 73 | classes='ALL_CLASSES')) 74 | evaluation = dict( 75 | interval=4000, 76 | metric='bbox', 77 | classwise=True, 78 | class_splits=['BASE_CLASSES', 'NOVEL_CLASSES']) 79 | -------------------------------------------------------------------------------- /configs/_base_/datasets/fine_tune_based/few_shot_voc.py: -------------------------------------------------------------------------------- 1 | # dataset settings 2 | img_norm_cfg = dict( 3 | mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) 4 | train_pipeline = [ 5 | dict(type='LoadImageFromFile'), 6 | dict(type='LoadAnnotations', with_bbox=True), 7 | dict( 8 | type='Resize', 9 | img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576), 10 | (1333, 608), (1333, 640), (1333, 672), (1333, 704), 11 | (1333, 736), (1333, 768), (1333, 800)], 12 | keep_ratio=True, 13 | multiscale_mode='value'), 14 | dict(type='RandomFlip', flip_ratio=0.5), 15 | dict(type='Normalize', **img_norm_cfg), 16 | dict(type='Pad', size_divisor=32), 17 | dict(type='DefaultFormatBundle'), 18 | dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) 19 | ] 20 | # classes splits are predefined in FewShotVOCDataset 21 | # FewShotVOCDefaultDataset predefine ann_cfg for model reproducibility. 22 | test_pipeline = [ 23 | dict(type='LoadImageFromFile'), 24 | dict( 25 | type='MultiScaleFlipAug', 26 | img_scale=(1333, 800), 27 | flip=False, 28 | transforms=[ 29 | dict(type='Resize', keep_ratio=True), 30 | dict(type='RandomFlip'), 31 | dict(type='Normalize', **img_norm_cfg), 32 | dict(type='Pad', size_divisor=32), 33 | dict(type='ImageToTensor', keys=['img']), 34 | dict(type='Collect', keys=['img']) 35 | ]) 36 | ] 37 | # classes splits are predefined in FewShotVOCDataset 38 | data_root = 'data/VOCdevkit/' 39 | data = dict( 40 | samples_per_gpu=2, 41 | workers_per_gpu=2, 42 | train=dict( 43 | type='FewShotVOCDataset', 44 | save_dataset=True, 45 | ann_cfg=[ 46 | dict( 47 | type='ann_file', 48 | ann_file=data_root + 'VOC2007/ImageSets/Main/trainval.txt'), 49 | dict( 50 | type='ann_file', 51 | ann_file=data_root + 'VOC2012/ImageSets/Main/trainval.txt') 52 | ], 53 | img_prefix=data_root, 54 | num_novel_shots=None, 55 | num_base_shots=None, 56 | pipeline=train_pipeline, 57 | classes=None, 58 | use_difficult=False, 59 | instance_wise=False), 60 | val=dict( 61 | type='FewShotVOCDataset', 62 | ann_cfg=[ 63 | dict( 64 | type='ann_file', 65 | ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt') 66 | ], 67 | img_prefix=data_root, 68 | pipeline=test_pipeline, 69 | classes=None), 70 | test=dict( 71 | type='FewShotVOCDataset', 72 | ann_cfg=[ 73 | dict( 74 | type='ann_file', 75 | ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt') 76 | ], 77 | img_prefix=data_root, 78 | pipeline=test_pipeline, 79 | test_mode=True, 80 | classes=None)) 81 | evaluation = dict(interval=2000, metric='mAP', class_splits=None) 82 | -------------------------------------------------------------------------------- /configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # checkpoint_config = dict(interval=5000) 2 | # yapf:disable 3 | log_config = dict( 4 | interval=50, 5 | hooks=[ 6 | dict(type='TextLoggerHook'), 7 | # dict(type='TensorboardLoggerHook') 8 | ]) 9 | # yapf:enable 10 | custom_hooks = [dict(type='NumClassCheckHook')] 11 | 12 | dist_params = dict(backend='nccl') 13 | log_level = 'INFO' 14 | load_from = None 15 | resume_from = None 16 | workflow = [('train', 1)] 17 | use_infinite_sampler = True 18 | # a magical seed works well in most cases for this repo!!! 19 | # using different seeds might raise some issues about reproducibility 20 | seed = 42 21 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_10k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=500, 22 | warmup_ratio=0.1, 23 | step=[5000, 8000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=10000) 26 | checkpoint_config = dict(interval=1000) 27 | evaluation = dict(interval=1000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_10k_1e-5.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.00001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=500, 22 | warmup_ratio=0.1, 23 | step=[5000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=10000) 26 | checkpoint_config = dict(interval=1000) 27 | evaluation = dict(interval=1000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_10k_6e-5.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.00006, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=500, 22 | warmup_ratio=0.1, 23 | step=[5000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=10000) 26 | checkpoint_config = dict(interval=1000) 27 | evaluation = dict(interval=1000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=10000, 22 | warmup_ratio=0.1, 23 | step=[100000, 140000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=160000) 26 | checkpoint_config = dict(interval=16000) 27 | evaluation = dict(interval=16000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_18k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=1000, 22 | warmup_ratio=0.1, 23 | step=[12000, 16000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=18000) 26 | checkpoint_config = dict(interval=1800) 27 | evaluation = dict(interval=1800, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_1k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.00006, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=50, 22 | warmup_ratio=0.1, 23 | step=[800]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=1000) 26 | checkpoint_config = dict(interval=100) 27 | evaluation = dict(interval=100, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=1000, 22 | warmup_ratio=0.1, 23 | step=[15000, 18000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=20000) 26 | checkpoint_config = dict(interval=2000) 27 | evaluation = dict(interval=2000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_20k_2e-5.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.00002, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=1000, 22 | warmup_ratio=0.1, 23 | step=[15000, 18000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=20000) 26 | checkpoint_config = dict(interval=2000) 27 | evaluation = dict(interval=2000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_20k_6e-5.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.00006, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=1000, 22 | warmup_ratio=0.1, 23 | step=[15000, 18000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=20000) 26 | checkpoint_config = dict(interval=2000) 27 | evaluation = dict(interval=2000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_2k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=100, 22 | warmup_ratio=0.1, 23 | step=[1000, 1500]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=2000) 26 | checkpoint_config = dict(interval=200) 27 | evaluation = dict(interval=200, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=1000, 22 | warmup_ratio=0.1, 23 | step=[24000, 32000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=40000) 26 | checkpoint_config = dict(interval=4000) 27 | evaluation = dict(interval=4000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_40k_1e-5.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.00001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=1000, 22 | warmup_ratio=0.1, 23 | step=[24000, 32000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=40000) 26 | checkpoint_config = dict(interval=4000) 27 | evaluation = dict(interval=4000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_40k_6e-5.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.00006, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=1000, 22 | warmup_ratio=0.1, 23 | step=[24000, 32000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=40000) 26 | checkpoint_config = dict(interval=4000) 27 | evaluation = dict(interval=4000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_5k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=100, 22 | warmup_ratio=0.1, 23 | step=[3000, 4000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=5000) 26 | checkpoint_config = dict(interval=500) 27 | evaluation = dict(interval=500, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/adamw_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict( 3 | type='AdamW', 4 | lr=0.0001, 5 | betas=(0.9, 0.999), 6 | weight_decay=0.05, 7 | paramwise_cfg=dict( 8 | custom_keys={ 9 | 'absolute_pos_embed': dict(decay_mult=0.), 10 | 'relative_position_bias_table': dict(decay_mult=0.), 11 | 'norm': dict(decay_mult=0.) 12 | } 13 | ) 14 | ) 15 | optimizer_config = dict(grad_clip=None) 16 | 17 | # learning policy 18 | lr_config = dict( 19 | policy='step', 20 | warmup='linear', 21 | warmup_iters=3000, 22 | warmup_ratio=0.1, 23 | step=[40000, 60000]) 24 | 25 | runner = dict(type='IterBasedRunner', max_iters=80000) 26 | checkpoint_config = dict(interval=8000) 27 | evaluation = dict(interval=8000, metric=['mAP']) 28 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[60000, 80000]) 11 | runner = dict(type='IterBasedRunner', max_iters=90000) 12 | -------------------------------------------------------------------------------- /configs/_base_/schedules/sgd_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | 5 | # learning policy 6 | lr_config = dict( 7 | policy='step', 8 | warmup='linear', 9 | warmup_iters=1000, 10 | warmup_ratio=0.1, 11 | step=[15000, 18000]) 12 | 13 | runner = dict(type='IterBasedRunner', max_iters=20000) 14 | checkpoint_config = dict(interval=2000) 15 | evaluation = dict(interval=2000, metric=['mAP']) 16 | -------------------------------------------------------------------------------- /configs/st_fsod/dior/split1/st-fsod_maskrcnn_r101_40k_dior-split1_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_dior_bs16.py', 3 | '../../../_base_/schedules/adamw_40k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT1'), 10 | val=dict(classes='BASE_CLASSES_SPLIT1'), 11 | test=dict(classes='BASE_CLASSES_SPLIT1')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=15))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | evaluation = dict( 30 | class_splits=['BASE_CLASSES_SPLIT1'], 31 | ) 32 | -------------------------------------------------------------------------------- /configs/st_fsod/dior/split2/st-fsod_maskrcnn_r101_40k_dior-trainval-split2_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_dior-trainval_bs16.py', 3 | '../../../_base_/schedules/adamw_40k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT2'), 10 | val=dict(classes='BASE_CLASSES_SPLIT2'), 11 | test=dict(classes='BASE_CLASSES_SPLIT2')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=15))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | -------------------------------------------------------------------------------- /configs/st_fsod/dior/split3/st-fsod_maskrcnn_r101_40k_dior-trainval-split3_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_dior-trainval_bs16.py', 3 | '../../../_base_/schedules/adamw_40k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT3'), 10 | val=dict(classes='BASE_CLASSES_SPLIT3'), 11 | test=dict(classes='BASE_CLASSES_SPLIT3')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=15))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | -------------------------------------------------------------------------------- /configs/st_fsod/dior/split4/st-fsod_maskrcnn_r101_40k_dior-trainval-split4_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_dior-trainval_bs16.py', 3 | '../../../_base_/schedules/adamw_40k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT4'), 10 | val=dict(classes='BASE_CLASSES_SPLIT4'), 11 | test=dict(classes='BASE_CLASSES_SPLIT4')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=15))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | -------------------------------------------------------------------------------- /configs/st_fsod/dior/split5/st-fsod_maskrcnn_r101_40k_dior-trainval-split5_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_dior-trainval_bs16.py', 3 | '../../../_base_/schedules/adamw_40k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT5'), 10 | val=dict(classes='BASE_CLASSES_SPLIT5'), 11 | test=dict(classes='BASE_CLASSES_SPLIT5')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=15))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | -------------------------------------------------------------------------------- /configs/st_fsod/isaid/split1/st-fsod_maskrcnn_r50_80k_isaid-split1_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_isaid_bs16.py', 3 | '../../../_base_/schedules/adamw_80k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT1'), 10 | val=dict(classes='BASE_CLASSES_SPLIT1'), 11 | test=dict(classes='BASE_CLASSES_SPLIT1')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | # depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=11))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | 30 | evaluation = dict(interval=8000, metric=['bbox'], classwise=True) 31 | -------------------------------------------------------------------------------- /configs/st_fsod/isaid/split2/st-fsod_maskrcnn_r50_80k_isaid-split2_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_isaid_bs16.py', 3 | '../../../_base_/schedules/adamw_80k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT2'), 10 | val=dict(classes='BASE_CLASSES_SPLIT2'), 11 | test=dict(classes='BASE_CLASSES_SPLIT2')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | # depth=50, 19 | # frozen_stages=[], 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=12))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | 30 | evaluation = dict(interval=8000, metric=['bbox'], classwise=True) 31 | -------------------------------------------------------------------------------- /configs/st_fsod/isaid/split3/st-fsod_maskrcnn_r50_80k_isaid-split3_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_isaid_bs16.py', 3 | '../../../_base_/schedules/adamw_80k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT3'), 10 | val=dict(classes='BASE_CLASSES_SPLIT3'), 11 | test=dict(classes='BASE_CLASSES_SPLIT3')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | # depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=9))) 26 | 27 | # using regular sampler can get a better base model 28 | use_infinite_sampler = False 29 | 30 | evaluation = dict(interval=8000, metric=['bbox'], classwise=True) 31 | -------------------------------------------------------------------------------- /configs/st_fsod/nwpu/split1/st-fsod_maskrcnn_r50_10k_nwpu-split1_base-training.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../../_base_/datasets/fine_tune_based/base_nwpu.py', 3 | '../../../_base_/schedules/adamw_10k.py', 4 | '../../../_base_/models/mask_rcnn_r50_fpn.py', 5 | '../../../_base_/default_runtime.py' 6 | ] 7 | # classes splits are predefined in FewShotVOCDataset 8 | data = dict( 9 | train=dict(classes='BASE_CLASSES_SPLIT1'), 10 | val=dict(classes='BASE_CLASSES_SPLIT1'), 11 | test=dict(classes='BASE_CLASSES_SPLIT1')) 12 | # lr_config = dict(warmup_iters=100, step=[12000, 16000]) 13 | # runner = dict(max_iters=18000) 14 | # model settings 15 | model = dict( 16 | type='MaskRCNN', 17 | backbone=dict( 18 | # depth=101, 19 | # frozen_stages=1, 20 | init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50') 21 | ), 22 | roi_head=dict( 23 | mask_roi_extractor=None, 24 | mask_head=None, 25 | bbox_head=dict(num_classes=7), 26 | test_cfg=dict(return_proposals=False) 27 | ), 28 | ) 29 | 30 | # using regular sampler can get a better base model 31 | use_infinite_sampler = False 32 | 33 | # evaluation = dict(interval=4000, metric=['bbox'], classwise=True) 34 | -------------------------------------------------------------------------------- /configs/st_fsod/tfa_maskrcnn_r50.py: -------------------------------------------------------------------------------- 1 | _base_ = ['../_base_/models/mask_rcnn_r50_fpn.py'] 2 | model = dict( 3 | type='TFA', 4 | frozen_parameters=[ 5 | 'backbone', 'neck', 'rpn_head', 'roi_head.bbox_head.shared_fcs' 6 | ], 7 | roi_head=dict( 8 | mask_roi_extractor=None, 9 | mask_head=None, 10 | bbox_head=dict( 11 | type='CosineSimBBoxHead', 12 | num_shared_fcs=2, 13 | num_classes=20, 14 | scale=20) 15 | ) 16 | ) 17 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: st_fsod 2 | channels: 3 | - defaults 4 | dependencies: 5 | - _libgcc_mutex=0.1=main 6 | - _openmp_mutex=5.1=1_gnu 7 | - ca-certificates=2024.3.11=h06a4308_0 8 | - certifi=2022.12.7=py37h06a4308_0 9 | - ld_impl_linux-64=2.38=h1181459_1 10 | - libffi=3.4.4=h6a678d5_1 11 | - libgcc-ng=11.2.0=h1234567_1 12 | - libgomp=11.2.0=h1234567_1 13 | - libstdcxx-ng=11.2.0=h1234567_1 14 | - ncurses=6.4=h6a678d5_0 15 | - openssl=1.1.1w=h7f8727e_0 16 | - pip=22.3.1=py37h06a4308_0 17 | - python=3.7.16=h7a1cb2a_0 18 | - readline=8.2=h5eee18b_0 19 | - sqlite=3.45.3=h5eee18b_0 20 | - tk=8.6.14=h39e8969_0 21 | - wheel=0.38.4=py37h06a4308_0 22 | - xz=5.4.6=h5eee18b_1 23 | - zlib=1.2.13=h5eee18b_1 24 | - pip: 25 | - addict==2.4.0 26 | - aliyun-python-sdk-core==2.15.1 27 | - aliyun-python-sdk-kms==2.16.3 28 | - cffi==1.15.1 29 | - charset-normalizer==3.3.2 30 | - click==8.1.7 31 | - colorama==0.4.6 32 | - crcmod==1.7 33 | - cryptography==42.0.7 34 | - cycler==0.11.0 35 | - docker-pycreds==0.4.0 36 | - fonttools==4.38.0 37 | - gitdb==4.0.11 38 | - gitpython==3.1.43 39 | - h5py==3.8.0 40 | - idna==3.7 41 | - importlib-metadata==6.7.0 42 | - jmespath==0.10.0 43 | - kiwisolver==1.4.5 44 | - markdown==3.4.4 45 | - markdown-it-py==2.2.0 46 | - matplotlib==3.5.3 47 | - mdurl==0.1.2 48 | - mmcls==0.25.0 49 | - mmcv-full==1.7.1 50 | - model-index==0.1.11 51 | - numpy==1.21.6 52 | - nvidia-cublas-cu11==11.10.3.66 53 | - nvidia-cuda-nvrtc-cu11==11.7.99 54 | - nvidia-cuda-runtime-cu11==11.7.99 55 | - nvidia-cudnn-cu11==8.5.0.96 56 | - opencv-python==4.9.0.80 57 | - opendatalab==0.0.10 58 | - openmim==0.3.9 59 | - openxlab==0.0.10 60 | - ordered-set==4.1.0 61 | - oss2==2.17.0 62 | - packaging==24.0 63 | - pandas==1.3.5 64 | - pillow==9.5.0 65 | - platformdirs==4.0.0 66 | - portalocker==2.7.0 67 | - protobuf==4.24.4 68 | - psutil==5.9.8 69 | - pycocotools==2.0.7 70 | - pycparser==2.21 71 | - pycryptodome==3.20.0 72 | - pygments==2.17.2 73 | - pyparsing==3.1.2 74 | - python-dateutil==2.9.0.post0 75 | - pytz==2023.4 76 | - pyyaml==6.0.1 77 | - requests==2.28.2 78 | - rich==13.7.1 79 | - seaborn==0.12.2 80 | - sentry-sdk==2.2.1 81 | - setproctitle==1.3.3 82 | - setuptools==60.2.0 83 | - six==1.16.0 84 | - smmap==5.0.1 85 | - tabulate==0.9.0 86 | - terminaltables==3.1.10 87 | - tomli==2.0.1 88 | - torch==1.13.0 89 | - torchdata==0.5.0 90 | - torchvision==0.14.0 91 | - tqdm==4.65.2 92 | - typing-extensions==4.7.1 93 | - urllib3==1.26.18 94 | - wandb==0.17.0 95 | - yapf==0.40.1 96 | - zipp==3.15.0 97 | prefix: /data/fahong/Software/Anaconda/envs/earthnet 98 | -------------------------------------------------------------------------------- /model-index.yml: -------------------------------------------------------------------------------- 1 | Import: 2 | - configs/classification/baseline/cub/metafile.yml 3 | - configs/classification/baseline/mini_imagenet/metafile.yml 4 | - configs/classification/baseline/tiered_imagenet/metafile.yml 5 | - configs/classification/baseline_plus/cub/metafile.yml 6 | - configs/classification/baseline_plus/mini_imagenet/metafile.yml 7 | - configs/classification/baseline_plus/tiered_imagenet/metafile.yml 8 | - configs/classification/maml/cub/metafile.yml 9 | - configs/classification/maml/mini_imagenet/metafile.yml 10 | - configs/classification/maml/tiered_imagenet/metafile.yml 11 | - configs/classification/matching_net/cub/metafile.yml 12 | - configs/classification/matching_net/mini_imagenet/metafile.yml 13 | - configs/classification/matching_net/tiered_imagenet/metafile.yml 14 | - configs/classification/meta_baseline/cub/metafile.yml 15 | - configs/classification/meta_baseline/mini_imagenet/metafile.yml 16 | - configs/classification/meta_baseline/tiered_imagenet/metafile.yml 17 | - configs/classification/neg_margin/cub/metafile.yml 18 | - configs/classification/neg_margin/mini_imagenet/metafile.yml 19 | - configs/classification/neg_margin/tiered_imagenet/metafile.yml 20 | - configs/classification/proto_net/cub/metafile.yml 21 | - configs/classification/proto_net/mini_imagenet/metafile.yml 22 | - configs/classification/proto_net/tiered_imagenet/metafile.yml 23 | - configs/classification/relation_net/cub/metafile.yml 24 | - configs/classification/relation_net/mini_imagenet/metafile.yml 25 | - configs/classification/relation_net/tiered_imagenet/metafile.yml 26 | - configs/detection/attention_rpn/coco/metafile.yml 27 | - configs/detection/attention_rpn/voc/split1/metafile.yml 28 | - configs/detection/attention_rpn/voc/split2/metafile.yml 29 | - configs/detection/attention_rpn/voc/split3/metafile.yml 30 | - configs/detection/fsce/coco/metafile.yml 31 | - configs/detection/fsce/voc/split1/metafile.yml 32 | - configs/detection/fsce/voc/split2/metafile.yml 33 | - configs/detection/fsce/voc/split3/metafile.yml 34 | - configs/detection/fsdetview/coco/metafile.yml 35 | - configs/detection/fsdetview/voc/split1/metafile.yml 36 | - configs/detection/fsdetview/voc/split2/metafile.yml 37 | - configs/detection/fsdetview/voc/split3/metafile.yml 38 | - configs/detection/meta_rcnn/coco/metafile.yml 39 | - configs/detection/meta_rcnn/voc/split1/metafile.yml 40 | - configs/detection/meta_rcnn/voc/split2/metafile.yml 41 | - configs/detection/meta_rcnn/voc/split3/metafile.yml 42 | - configs/detection/mpsr/coco/metafile.yml 43 | - configs/detection/mpsr/voc/split1/metafile.yml 44 | - configs/detection/mpsr/voc/split2/metafile.yml 45 | - configs/detection/mpsr/voc/split3/metafile.yml 46 | - configs/detection/tfa/coco/metafile.yml 47 | - configs/detection/tfa/voc/split1/metafile.yml 48 | - configs/detection/tfa/voc/split2/metafile.yml 49 | - configs/detection/tfa/voc/split3/metafile.yml 50 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/build.txt 2 | -r requirements/optional.txt 3 | -r requirements/runtime.txt 4 | -r requirements/tests.txt 5 | -------------------------------------------------------------------------------- /requirements/build.txt: -------------------------------------------------------------------------------- 1 | # These must be installed before building rsifewshot 2 | cython 3 | numpy 4 | -------------------------------------------------------------------------------- /requirements/docs.txt: -------------------------------------------------------------------------------- 1 | docutils==0.16.0 2 | myst-parser 3 | -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme 4 | sphinx==4.0.2 5 | sphinx-copybutton 6 | sphinx_markdown_tables 7 | sphinx_rtd_theme==0.5.2 8 | -------------------------------------------------------------------------------- /requirements/mminstall.txt: -------------------------------------------------------------------------------- 1 | mmcls>=0.15.0 2 | mmcv-full>=1.3.8 3 | rsidet>=2.16.0 4 | -------------------------------------------------------------------------------- /requirements/optional.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 2 | cityscapesscripts 3 | imagecorruptions 4 | scipy 5 | sklearn 6 | -------------------------------------------------------------------------------- /requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcls 2 | mmcv 3 | rsidet 4 | torch 5 | torchvision 6 | -------------------------------------------------------------------------------- /requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | pycocotools; platform_system == "Linux" 4 | pycocotools-windows; platform_system == "Windows" 5 | six 6 | terminaltables 7 | typing_extensions 8 | -------------------------------------------------------------------------------- /requirements/tests.txt: -------------------------------------------------------------------------------- 1 | asynctest 2 | codecov 3 | flake8 4 | interrogate 5 | isort==4.3.21 6 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future. 7 | kwarray 8 | pytest 9 | ubelt 10 | xdoctest>=0.10.0 11 | yapf 12 | -------------------------------------------------------------------------------- /rsifewshot/.mim/configs: -------------------------------------------------------------------------------- 1 | ../../configs -------------------------------------------------------------------------------- /rsifewshot/.mim/demo: -------------------------------------------------------------------------------- 1 | ../../demo -------------------------------------------------------------------------------- /rsifewshot/.mim/model-index.yml: -------------------------------------------------------------------------------- 1 | ../../model-index.yml -------------------------------------------------------------------------------- /rsifewshot/.mim/tools: -------------------------------------------------------------------------------- 1 | ../../tools -------------------------------------------------------------------------------- /rsifewshot/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcls # noqa: F401, F403 3 | import mmcv 4 | import rsidet # noqa: F401, F403 5 | # from .classification import * # noqa: F401, F403 6 | from .detection import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | from .version import __version__, short_version 9 | 10 | def digit_version(version_str): 11 | digit_version_ = [] 12 | for x in version_str.split('.'): 13 | if x.isdigit(): 14 | digit_version_.append(int(x)) 15 | elif x.find('rc') != -1: 16 | patch_version = x.split('rc') 17 | digit_version_.append(int(patch_version[0]) - 1) 18 | digit_version_.append(int(patch_version[1])) 19 | return digit_version_ 20 | 21 | 22 | mmcv_minimum_version = '1.3.12' 23 | mmcv_maximum_version = '1.7.1' 24 | mmcv_version = digit_version(mmcv.__version__) 25 | 26 | 27 | assert (digit_version(mmcv_minimum_version) <= mmcv_version 28 | <= digit_version(mmcv_maximum_version)), \ 29 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 30 | f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' 31 | 32 | rsidet_minimum_version = '2.16.0' 33 | rsidet_maximum_version = '2.26.0' 34 | rsidet_version = digit_version(rsidet.__version__) 35 | 36 | 37 | assert (digit_version(rsidet_minimum_version) <= rsidet_version 38 | <= digit_version(rsidet_maximum_version)), \ 39 | f'MMDET=={rsidet.__version__} is used but incompatible. ' \ 40 | f'Please install rsidet>={rsidet_minimum_version},\ 41 | <={rsidet_maximum_version}.' 42 | 43 | mmcls_minimum_version = '0.15.0' 44 | mmcls_maximum_version = '0.25.0' 45 | mmcls_version = digit_version(mmcls.__version__) 46 | 47 | 48 | assert (digit_version(mmcls_minimum_version) <= mmcls_version 49 | <= digit_version(mmcls_maximum_version)), \ 50 | f'MMCLS=={mmcls.__version__} is used but incompatible. ' \ 51 | f'Please install mmcls>={mmcls_minimum_version},\ 52 | <={mmcls_maximum_version}.' 53 | 54 | __all__ = ['__version__', 'short_version'] 55 | -------------------------------------------------------------------------------- /rsifewshot/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .apis import * # noqa: F401,F403 3 | from .core import * # noqa: F401,F403 4 | from .datasets import * # noqa: F401,F403 5 | from .models import * # noqa: F401,F403 6 | from .utils import * # noqa: F401, F403 7 | -------------------------------------------------------------------------------- /rsifewshot/classification/apis/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .inference import (inference_classifier, init_classifier, 3 | process_support_images, show_result_pyplot) 4 | from .test import (Z_SCORE, multi_gpu_meta_test, single_gpu_meta_test, 5 | test_single_task) 6 | from .train import train_model 7 | 8 | __all__ = [ 9 | 'train_model', 'test_single_task', 'Z_SCORE', 'single_gpu_meta_test', 10 | 'multi_gpu_meta_test', 'init_classifier', 'process_support_images', 11 | 'inference_classifier', 'show_result_pyplot' 12 | ] 13 | -------------------------------------------------------------------------------- /rsifewshot/classification/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .evaluation import * # noqa: F401, F403 3 | -------------------------------------------------------------------------------- /rsifewshot/classification/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .eval_hooks import DistMetaTestEvalHook, MetaTestEvalHook 3 | 4 | __all__ = ['MetaTestEvalHook', 'DistMetaTestEvalHook'] 5 | -------------------------------------------------------------------------------- /rsifewshot/classification/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcls.datasets.builder import DATASETS, PIPELINES 3 | 4 | from .base import BaseFewShotDataset 5 | from .builder import (build_dataloader, build_dataset, 6 | build_meta_test_dataloader) 7 | from .cub import CUBDataset 8 | from .dataset_wrappers import EpisodicDataset, MetaTestDataset 9 | from .mini_imagenet import MiniImageNetDataset 10 | from .pipelines import LoadImageFromBytes 11 | from .tiered_imagenet import TieredImageNetDataset 12 | from .utils import label_wrapper 13 | 14 | __all__ = [ 15 | 'build_dataloader', 'build_dataset', 'DATASETS', 'PIPELINES', 'CUBDataset', 16 | 'LoadImageFromBytes', 'build_meta_test_dataloader', 'MiniImageNetDataset', 17 | 'TieredImageNetDataset', 'label_wrapper', 'BaseFewShotDataset', 18 | 'EpisodicDataset', 'MetaTestDataset' 19 | ] 20 | -------------------------------------------------------------------------------- /rsifewshot/classification/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .loading import LoadImageFromBytes 3 | 4 | __all__ = [ 5 | 'LoadImageFromBytes', 6 | ] 7 | -------------------------------------------------------------------------------- /rsifewshot/classification/datasets/pipelines/loading.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import os.path as osp 3 | from typing import Dict 4 | 5 | import mmcv 6 | import numpy as np 7 | from mmcls.datasets.builder import PIPELINES 8 | from mmcls.datasets.pipelines import LoadImageFromFile 9 | 10 | 11 | @PIPELINES.register_module() 12 | class LoadImageFromBytes(LoadImageFromFile): 13 | """Load an image from bytes.""" 14 | 15 | def __call__(self, results: Dict) -> Dict: 16 | if self.file_client is None: 17 | self.file_client = mmcv.FileClient(**self.file_client_args) 18 | if results['img_prefix'] is not None: 19 | filename = osp.join(results['img_prefix'], 20 | results['img_info']['filename']) 21 | else: 22 | filename = results['img_info']['filename'] 23 | if results.get('img_bytes', None) is None: 24 | img_bytes = self.file_client.get(filename) 25 | else: 26 | img_bytes = results.pop('img_bytes') 27 | img = mmcv.imfrombytes(img_bytes, flag=self.color_type) 28 | if self.to_float32: 29 | img = img.astype(np.float32) 30 | 31 | results['filename'] = filename 32 | results['ori_filename'] = results['img_info']['filename'] 33 | results['img'] = img 34 | results['img_shape'] = img.shape 35 | results['ori_shape'] = img.shape 36 | num_channels = 1 if len(img.shape) < 3 else img.shape[2] 37 | results['img_norm_cfg'] = dict( 38 | mean=np.zeros(num_channels, dtype=np.float32), 39 | std=np.ones(num_channels, dtype=np.float32), 40 | to_rgb=False) 41 | return results 42 | -------------------------------------------------------------------------------- /rsifewshot/classification/datasets/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import List, Union 3 | 4 | import numpy as np 5 | import torch 6 | from torch import Tensor 7 | 8 | 9 | def label_wrapper(labels: Union[Tensor, np.ndarray, List], 10 | class_ids: List[int]) -> Union[Tensor, np.ndarray, list]: 11 | """Map input labels into range of 0 to numbers of classes-1. 12 | 13 | It is usually used in the meta testing phase, in which the class ids are 14 | random sampled and discontinuous. 15 | 16 | Args: 17 | labels (Tensor | np.ndarray | list): The labels to be wrapped. 18 | class_ids (list[int]): All class ids of labels. 19 | 20 | Returns: 21 | (Tensor | np.ndarray | list): Same type as the input labels. 22 | """ 23 | class_id_map = {class_id: i for i, class_id in enumerate(class_ids)} 24 | if isinstance(labels, torch.Tensor): 25 | wrapped_labels = torch.tensor( 26 | [class_id_map[label.item()] for label in labels]) 27 | wrapped_labels = wrapped_labels.type_as(labels).to(labels.device) 28 | elif isinstance(labels, np.ndarray): 29 | wrapped_labels = np.array([class_id_map[label] for label in labels]) 30 | wrapped_labels = wrapped_labels.astype(labels.dtype) 31 | elif isinstance(labels, (tuple, list)): 32 | wrapped_labels = [class_id_map[label] for label in labels] 33 | else: 34 | raise TypeError('only support torch.Tensor, np.ndarray and list') 35 | return wrapped_labels 36 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcls.models.builder import * # noqa: F401,F403 3 | 4 | from .backbones import * # noqa: F401,F403 5 | from .classifiers import * # noqa: F401,F403 6 | from .heads import * # noqa: F401,F403 7 | from .losses import * # noqa: F401,F403 8 | from .utils import * # noqa: F401,F403 9 | 10 | __all__ = [] 11 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcls.models.builder import BACKBONES 3 | 4 | from .conv4 import Conv4, ConvNet 5 | from .resnet12 import ResNet12 6 | from .wrn import WideResNet, WRN28x10 7 | 8 | __all__ = [ 9 | 'BACKBONES', 'ResNet12', 'Conv4', 'ConvNet', 'WRN28x10', 'WideResNet' 10 | ] 11 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/backbones/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from torch import Tensor 6 | from torch.distributions import Bernoulli 7 | 8 | # This part of code is modified from https://github.com/kjunelee/MetaOptNet 9 | 10 | 11 | class DropBlock(nn.Module): 12 | 13 | def __init__(self, block_size: int) -> None: 14 | super().__init__() 15 | self.block_size = block_size 16 | 17 | def forward(self, x: Tensor, gamma: float) -> Tensor: 18 | # Randomly zeroes 2D spatial blocks of the input tensor. 19 | if self.training: 20 | batch_size, channels, height, width = x.shape 21 | bernoulli = Bernoulli(gamma) 22 | mask = bernoulli.sample( 23 | (batch_size, channels, height - (self.block_size - 1), 24 | width - (self.block_size - 1))) 25 | mask = mask.to(x.device) 26 | block_mask = self._compute_block_mask(mask) 27 | countM = block_mask.size()[0] * block_mask.size( 28 | )[1] * block_mask.size()[2] * block_mask.size()[3] 29 | count_ones = block_mask.sum() 30 | 31 | return block_mask * x * (countM / count_ones) 32 | else: 33 | return x 34 | 35 | def _compute_block_mask(self, mask: Tensor) -> Tensor: 36 | left_padding = int((self.block_size - 1) / 2) 37 | right_padding = int(self.block_size / 2) 38 | 39 | non_zero_idxes = mask.nonzero() 40 | nr_blocks = non_zero_idxes.shape[0] 41 | 42 | offsets = torch.stack([ 43 | torch.arange(self.block_size).view(-1, 1).expand( 44 | self.block_size, self.block_size).reshape(-1), 45 | torch.arange(self.block_size).repeat(self.block_size), 46 | ]).t() 47 | offsets = torch.cat( 48 | (torch.zeros(self.block_size**2, 2).long(), offsets.long()), 1) 49 | offsets = offsets.to(mask.device) 50 | 51 | if nr_blocks > 0: 52 | non_zero_idxes = non_zero_idxes.repeat(self.block_size**2, 1) 53 | offsets = offsets.repeat(nr_blocks, 1).view(-1, 4) 54 | offsets = offsets.long() 55 | 56 | block_idxes = non_zero_idxes + offsets 57 | padded_mask = F.pad( 58 | mask, 59 | (left_padding, right_padding, left_padding, right_padding)) 60 | padded_mask[block_idxes[:, 0], block_idxes[:, 1], 61 | block_idxes[:, 2], block_idxes[:, 3]] = 1. 62 | else: 63 | padded_mask = F.pad( 64 | mask, 65 | (left_padding, right_padding, left_padding, right_padding)) 66 | 67 | block_mask = 1 - padded_mask 68 | return block_mask 69 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcls.models.builder import CLASSIFIERS 3 | 4 | from .base_finetune import BaseFinetuneClassifier 5 | from .base_metric import BaseMetricClassifier 6 | from .baseline import Baseline 7 | from .baseline_plus import BaselinePlus 8 | from .maml import MAML 9 | from .matching_net import MatchingNet 10 | from .meta_baseline import MetaBaseline 11 | from .neg_margin import NegMargin 12 | from .proto_net import ProtoNet 13 | from .relation_net import RelationNet 14 | 15 | __all__ = [ 16 | 'CLASSIFIERS', 'BaseFinetuneClassifier', 'BaseMetricClassifier', 17 | 'Baseline', 'BaselinePlus', 'ProtoNet', 'MatchingNet', 'RelationNet', 18 | 'NegMargin', 'MetaBaseline', 'MAML' 19 | ] 20 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/baseline.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict 3 | 4 | from mmcls.models.builder import CLASSIFIERS 5 | 6 | from .base_finetune import BaseFinetuneClassifier 7 | 8 | 9 | @CLASSIFIERS.register_module() 10 | class Baseline(BaseFinetuneClassifier): 11 | """Implementation of `Baseline `_. 12 | 13 | Args: 14 | head (dict): Config of classification head for training. 15 | meta_test_head (dict): Config of classification head for meta testing. 16 | the `meta_test_head` only will be built and run in meta testing. 17 | """ 18 | 19 | def __init__(self, 20 | head: Dict = dict( 21 | type='LinearHead', num_classes=100, in_channels=1600), 22 | meta_test_head: Dict = dict( 23 | type='LinearHead', num_classes=5, in_channels=1600), 24 | *args, 25 | **kwargs) -> None: 26 | super().__init__( 27 | head=head, meta_test_head=meta_test_head, *args, **kwargs) 28 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/baseline_plus.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict 3 | 4 | from mmcls.models.builder import CLASSIFIERS 5 | 6 | from .base_finetune import BaseFinetuneClassifier 7 | 8 | 9 | @CLASSIFIERS.register_module() 10 | class BaselinePlus(BaseFinetuneClassifier): 11 | """Implementation of `Baseline++ `_. 12 | 13 | Args: 14 | head (dict): Config of classification head for training. 15 | meta_test_head (dict): Config of classification head for meta testing. 16 | the `meta_test_head` only will be built and run in meta testing. 17 | """ 18 | 19 | def __init__(self, 20 | head: Dict = dict( 21 | type='CosineDistanceHead', 22 | num_classes=100, 23 | in_channels=1600), 24 | meta_test_head: Dict = dict( 25 | type='CosineDistanceHead', 26 | num_classes=5, 27 | in_channels=1600), 28 | *args, 29 | **kwargs) -> None: 30 | super().__init__( 31 | head=head, meta_test_head=meta_test_head, *args, **kwargs) 32 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/matching_net.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import copy 3 | from typing import Dict 4 | 5 | from mmcls.models.builder import CLASSIFIERS 6 | 7 | from .base_metric import BaseMetricClassifier 8 | 9 | 10 | @CLASSIFIERS.register_module() 11 | class MatchingNet(BaseMetricClassifier): 12 | """Implementation of `MatchingNet `_.""" 13 | 14 | def __init__(self, head: Dict = dict(type='MatchingHead'), *args, 15 | **kwargs) -> None: 16 | self.head_cfg = copy.deepcopy(head) 17 | super().__init__(head=head, *args, **kwargs) 18 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/meta_baseline.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict 3 | 4 | from mmcls.models.builder import CLASSIFIERS 5 | 6 | from .base_metric import BaseMetricClassifier 7 | 8 | 9 | @CLASSIFIERS.register_module() 10 | class MetaBaseline(BaseMetricClassifier): 11 | """Implementation of `MetaBaseline `_. 12 | 13 | Args: 14 | head (dict): Config of classification head for training. 15 | """ 16 | 17 | def __init__(self, 18 | head: Dict = dict(type='MetaBaselineHead'), 19 | *args, 20 | **kwargs) -> None: 21 | super().__init__(head=head, *args, **kwargs) 22 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/neg_margin.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict 3 | 4 | from mmcls.models.builder import CLASSIFIERS 5 | 6 | from .base_finetune import BaseFinetuneClassifier 7 | 8 | 9 | @CLASSIFIERS.register_module() 10 | class NegMargin(BaseFinetuneClassifier): 11 | """Implementation of `NegMargin `_.""" 12 | 13 | def __init__(self, 14 | head: Dict = dict( 15 | type='NegMarginHead', 16 | metric_type='cosine', 17 | num_classes=100, 18 | in_channels=1600, 19 | margin=-0.02, 20 | temperature=30.0), 21 | meta_test_head: Dict = dict( 22 | type='NegMarginHead', 23 | metric_type='cosine', 24 | num_classes=5, 25 | in_channels=1600, 26 | margin=0.0, 27 | temperature=5.0), 28 | *args, 29 | **kwargs) -> None: 30 | super().__init__( 31 | head=head, meta_test_head=meta_test_head, *args, **kwargs) 32 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/proto_net.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import copy 3 | from typing import Dict 4 | 5 | from mmcls.models.builder import CLASSIFIERS 6 | 7 | from .base_metric import BaseMetricClassifier 8 | 9 | 10 | @CLASSIFIERS.register_module() 11 | class ProtoNet(BaseMetricClassifier): 12 | """Implementation of `ProtoNet `_.""" 13 | 14 | def __init__(self, 15 | head: Dict = dict(type='PrototypeHead'), 16 | *args, 17 | **kwargs) -> None: 18 | self.head_cfg = copy.deepcopy(head) 19 | super().__init__(head=head, *args, **kwargs) 20 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/classifiers/relation_net.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import copy 3 | from typing import Dict 4 | 5 | from mmcls.models.builder import CLASSIFIERS 6 | 7 | from .base_metric import BaseMetricClassifier 8 | 9 | 10 | @CLASSIFIERS.register_module() 11 | class RelationNet(BaseMetricClassifier): 12 | """Implementation of `RelationNet `_.""" 13 | 14 | def __init__(self, 15 | head: Dict = dict( 16 | type='RelationHead', 17 | in_channels=64, 18 | feature_size=(19, 19)), 19 | *args, 20 | **kwargs) -> None: 21 | self.head_cfg = copy.deepcopy(head) 22 | super().__init__(head=head, *args, **kwargs) 23 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcls.models.builder import HEADS 3 | 4 | from .cosine_distance_head import CosineDistanceHead 5 | from .linear_head import LinearHead 6 | from .matching_head import MatchingHead 7 | from .meta_baseline_head import MetaBaselineHead 8 | from .neg_margin_head import NegMarginHead 9 | from .prototype_head import PrototypeHead 10 | from .relation_head import RelationHead 11 | 12 | __all__ = [ 13 | 'HEADS', 'MetaBaselineHead', 'MatchingHead', 'NegMarginHead', 'LinearHead', 14 | 'CosineDistanceHead', 'PrototypeHead', 'RelationHead' 15 | ] 16 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/heads/linear_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict, List 3 | 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from mmcls.models.builder import HEADS 7 | from torch import Tensor 8 | 9 | from .base_head import BaseFewShotHead 10 | 11 | 12 | @HEADS.register_module() 13 | class LinearHead(BaseFewShotHead): 14 | """Classification head for Baseline. 15 | 16 | Args: 17 | num_classes (int): Number of categories. 18 | in_channels (int): Number of channels in the input feature map. 19 | """ 20 | 21 | def __init__(self, num_classes: int, in_channels: int, *args, 22 | **kwargs) -> None: 23 | super().__init__(*args, **kwargs) 24 | assert num_classes > 0, f'num_classes={num_classes} ' \ 25 | f'must be a positive integer' 26 | 27 | self.num_classes = num_classes 28 | self.in_channels = in_channels 29 | 30 | self.init_layers() 31 | 32 | def init_layers(self) -> None: 33 | self.fc = nn.Linear(self.in_channels, self.num_classes) 34 | 35 | def forward_train(self, x: Tensor, gt_label: Tensor, **kwargs) -> Dict: 36 | """Forward training data.""" 37 | cls_score = self.fc(x) 38 | losses = self.loss(cls_score, gt_label) 39 | return losses 40 | 41 | def forward_support(self, x: Tensor, gt_label: Tensor, **kwargs) -> Dict: 42 | """Forward support data in meta testing.""" 43 | return self.forward_train(x, gt_label, **kwargs) 44 | 45 | def forward_query(self, x: Tensor, **kwargs) -> List: 46 | """Forward query data in meta testing.""" 47 | cls_score = self.fc(x) 48 | pred = F.softmax(cls_score, dim=1) 49 | pred = list(pred.detach().cpu().numpy()) 50 | return pred 51 | 52 | def before_forward_support(self) -> None: 53 | """Used in meta testing. 54 | 55 | This function will be called before model forward support data during 56 | meta testing. 57 | """ 58 | self.init_layers() 59 | self.train() 60 | 61 | def before_forward_query(self) -> None: 62 | """Used in meta testing. 63 | 64 | This function will be called before model forward query data during 65 | meta testing. 66 | """ 67 | self.eval() 68 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .mse_loss import MSELoss 3 | from .nll_loss import NLLLoss 4 | 5 | __all__ = ['MSELoss', 'NLLLoss'] 6 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/losses/mse_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Optional, Union 3 | 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from mmcls.models.builder import LOSSES 7 | from mmcls.models.losses.utils import weighted_loss 8 | from torch import Tensor 9 | from typing_extensions import Literal 10 | 11 | 12 | @weighted_loss 13 | def mse_loss(pred: Tensor, target: Tensor) -> Tensor: 14 | """Wrapper of mse loss.""" 15 | return F.mse_loss(pred, target, reduction='none') 16 | 17 | 18 | @LOSSES.register_module() 19 | class MSELoss(nn.Module): 20 | """MSELoss. 21 | 22 | Args: 23 | reduction (str): The method that reduces the loss to a 24 | scalar. Options are "none", "mean" and "sum". Default: 'mean'. 25 | loss_weight (float): The weight of the loss. Default: 1.0. 26 | """ 27 | 28 | def __init__(self, 29 | reduction: Literal['none', 'mean', 'sum'] = 'mean', 30 | loss_weight: float = 1.0) -> None: 31 | super().__init__() 32 | self.reduction = reduction 33 | self.loss_weight = loss_weight 34 | 35 | def forward(self, 36 | pred: Tensor, 37 | target: Tensor, 38 | weight: Optional[Tensor] = None, 39 | avg_factor: Optional[Union[float, int]] = None, 40 | reduction_override: str = None) -> Tensor: 41 | """Forward function of loss. 42 | 43 | Args: 44 | pred (Tensor): The prediction with shape (N, *), where * means 45 | any number of additional dimensions. 46 | target (Tensor): The learning target of the prediction 47 | with shape (N, *) same as the input. 48 | weight (Tensor | None): Weight of the loss for each 49 | prediction. Default: None. 50 | avg_factor (float | int | None): Average factor that is used to 51 | average the loss. Default: None. 52 | reduction_override (str | None): The reduction method used to 53 | override the original reduction method of the loss. 54 | Options are "none", "mean" and "sum". Default: None. 55 | 56 | Returns: 57 | Tensor: The calculated loss 58 | """ 59 | assert reduction_override in (None, 'none', 'mean', 'sum') 60 | reduction = ( 61 | reduction_override if reduction_override else self.reduction) 62 | loss = self.loss_weight * mse_loss( 63 | pred, target, weight, reduction=reduction, avg_factor=avg_factor) 64 | return loss 65 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/losses/nll_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Optional, Union 3 | 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from mmcls.models.builder import LOSSES 7 | from mmcls.models.losses.utils import weighted_loss 8 | from torch import Tensor 9 | from typing_extensions import Literal 10 | 11 | 12 | @weighted_loss 13 | def nll_loss(pred: Tensor, target: Tensor) -> Tensor: 14 | """Wrapper of nll loss.""" 15 | return F.nll_loss(pred, target, reduction='none') 16 | 17 | 18 | @LOSSES.register_module() 19 | class NLLLoss(nn.Module): 20 | """NLLLoss. 21 | 22 | Args: 23 | reduction (str): The method that reduces the loss to a 24 | scalar. Options are "none", "mean" and "sum". Default: 'mean'. 25 | loss_weight (float): The weight of the loss. Default: 1.0. 26 | """ 27 | 28 | def __init__(self, 29 | reduction: Literal['none', 'mean', 'sum'] = 'mean', 30 | loss_weight: float = 1.0): 31 | super().__init__() 32 | self.reduction = reduction 33 | self.loss_weight = loss_weight 34 | 35 | def forward(self, 36 | pred: Tensor, 37 | target: Tensor, 38 | weight: Optional[Tensor] = None, 39 | avg_factor: Optional[Union[float, int]] = None, 40 | reduction_override: Optional[str] = None) -> Tensor: 41 | """Forward function of loss. 42 | 43 | Args: 44 | pred (Tensor): The prediction with shape (N, C). 45 | target (Tensor): The learning target of the prediction. 46 | with shape (N, 1). 47 | weight (Tensor | None): Weight of the loss for each 48 | prediction. Default: None. 49 | avg_factor (float | int | None): Average factor that is used to 50 | average the loss. Default: None. 51 | reduction_override (str | None): The reduction method used to 52 | override the original reduction method of the loss. 53 | Options are "none", "mean" and "sum". Default: None. 54 | 55 | Returns: 56 | Tensor: The calculated loss 57 | """ 58 | assert reduction_override in (None, 'none', 'mean', 'sum') 59 | reduction = ( 60 | reduction_override if reduction_override else self.reduction) 61 | loss = self.loss_weight * nll_loss( 62 | pred, target, weight, reduction=reduction, avg_factor=avg_factor) 63 | return loss 64 | -------------------------------------------------------------------------------- /rsifewshot/classification/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .maml_module import convert_maml_module 3 | 4 | __all__ = ['convert_maml_module'] 5 | -------------------------------------------------------------------------------- /rsifewshot/classification/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .meta_test_parallel import MetaTestParallel 3 | 4 | __all__ = ['MetaTestParallel'] 5 | -------------------------------------------------------------------------------- /rsifewshot/detection/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .apis import * # noqa: F401,F403 3 | from .core import * # noqa: F401,F403 4 | from .datasets import * # noqa: F401,F403 5 | from .models import * # noqa: F401,F403 6 | -------------------------------------------------------------------------------- /rsifewshot/detection/apis/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .inference import (inference_detector, init_detector, 3 | process_support_images) 4 | from .test import (multi_gpu_model_init, multi_gpu_test, single_gpu_model_init, 5 | single_gpu_test) 6 | from .train import train_detector 7 | 8 | __all__ = [ 9 | 'train_detector', 'single_gpu_model_init', 'multi_gpu_model_init', 10 | 'single_gpu_test', 'multi_gpu_test', 'inference_detector', 'init_detector', 11 | 'process_support_images' 12 | ] 13 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .evaluation import * # noqa: F401, F403 3 | from .utils import * # noqa: F401, F403 4 | from .hook import * # noqa: F401, F403 5 | # from .bbox import * 6 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | # from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, 3 | # MaxIoUAssigner, RegionAssigner) 4 | # from .builder import build_assigner, build_bbox_coder, build_sampler 5 | # from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder, 6 | # PseudoBBoxCoder, TBLRBBoxCoder) 7 | # from .iou_calculators import BboxOverlaps2D, bbox_overlaps 8 | # from .samplers import (BaseSampler, CombinedSampler, 9 | # InstanceBalancedPosSampler, IoUBalancedNegSampler, 10 | # OHEMSampler, PseudoSampler, RandomSampler, 11 | # SamplingResult, ScoreHLRSampler) 12 | # from .transforms import (bbox2distance, bbox2result, bbox2roi, 13 | # bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, 14 | # bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, 15 | # distance2bbox, find_inside_bboxes, roi2bbox) 16 | 17 | from .samplers import RandomSampler as RandomSamplerV2 18 | __all__ = ['RandomSamplerV2'] 19 | 20 | # __all__ = [ 21 | # 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner', 22 | # 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler', 23 | # 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 24 | # 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner', 25 | # 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back', 26 | # 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance', 27 | # 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder', 28 | # 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder', 29 | # 'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy', 30 | # 'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes' 31 | # ] 32 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 3 | from .assign_result import AssignResult 4 | from .atss_assigner import ATSSAssigner 5 | from .base_assigner import BaseAssigner 6 | from .center_region_assigner import CenterRegionAssigner 7 | from .grid_assigner import GridAssigner 8 | from .hungarian_assigner import HungarianAssigner 9 | from .mask_hungarian_assigner import MaskHungarianAssigner 10 | from .max_iou_assigner import MaxIoUAssigner 11 | from .point_assigner import PointAssigner 12 | from .region_assigner import RegionAssigner 13 | from .sim_ota_assigner import SimOTAAssigner 14 | from .task_aligned_assigner import TaskAlignedAssigner 15 | from .uniform_assigner import UniformAssigner 16 | 17 | __all__ = [ 18 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 19 | 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', 20 | 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner', 21 | 'TaskAlignedAssigner', 'MaskHungarianAssigner' 22 | ] 23 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | 5 | class BaseAssigner(metaclass=ABCMeta): 6 | """Base assigner that assigns boxes to ground truth boxes.""" 7 | 8 | @abstractmethod 9 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 10 | """Assign boxes to either a ground truth boxes or a negative boxes.""" 11 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import Registry, build_from_cfg 3 | 4 | BBOX_ASSIGNERS = Registry('bbox_assigner') 5 | BBOX_SAMPLERS = Registry('bbox_sampler_rsi') 6 | BBOX_CODERS = Registry('bbox_coder') 7 | 8 | 9 | def build_assigner(cfg, **default_args): 10 | """Builder of box assigner.""" 11 | return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) 12 | 13 | 14 | def build_sampler(cfg, **default_args): 15 | """Builder of box sampler.""" 16 | return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) 17 | 18 | 19 | def build_bbox_coder(cfg, **default_args): 20 | """Builder of box coder.""" 21 | return build_from_cfg(cfg, BBOX_CODERS, default_args) 22 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/coder/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_bbox_coder import BaseBBoxCoder 3 | from .bucketing_bbox_coder import BucketingBBoxCoder 4 | from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder 5 | from .distance_point_bbox_coder import DistancePointBBoxCoder 6 | from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder 7 | from .pseudo_bbox_coder import PseudoBBoxCoder 8 | from .tblr_bbox_coder import TBLRBBoxCoder 9 | from .yolo_bbox_coder import YOLOBBoxCoder 10 | 11 | __all__ = [ 12 | 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 13 | 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', 14 | 'BucketingBBoxCoder', 'DistancePointBBoxCoder' 15 | ] 16 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/coder/base_bbox_coder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | 5 | class BaseBBoxCoder(metaclass=ABCMeta): 6 | """Base bounding box coder.""" 7 | 8 | def __init__(self, **kwargs): 9 | pass 10 | 11 | @abstractmethod 12 | def encode(self, bboxes, gt_bboxes): 13 | """Encode deltas between bboxes and ground truth boxes.""" 14 | 15 | @abstractmethod 16 | def decode(self, bboxes, bboxes_pred): 17 | """Decode the predicted bboxes according to prediction and base 18 | boxes.""" 19 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/coder/distance_point_bbox_coder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import BBOX_CODERS 3 | from ..transforms import bbox2distance, distance2bbox 4 | from .base_bbox_coder import BaseBBoxCoder 5 | 6 | 7 | @BBOX_CODERS.register_module() 8 | class DistancePointBBoxCoder(BaseBBoxCoder): 9 | """Distance Point BBox coder. 10 | 11 | This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, 12 | right) and decode it back to the original. 13 | 14 | Args: 15 | clip_border (bool, optional): Whether clip the objects outside the 16 | border of the image. Defaults to True. 17 | """ 18 | 19 | def __init__(self, clip_border=True): 20 | super(BaseBBoxCoder, self).__init__() 21 | self.clip_border = clip_border 22 | 23 | def encode(self, points, gt_bboxes, max_dis=None, eps=0.1): 24 | """Encode bounding box to distances. 25 | 26 | Args: 27 | points (Tensor): Shape (N, 2), The format is [x, y]. 28 | gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy" 29 | max_dis (float): Upper bound of the distance. Default None. 30 | eps (float): a small value to ensure target < max_dis, instead <=. 31 | Default 0.1. 32 | 33 | Returns: 34 | Tensor: Box transformation deltas. The shape is (N, 4). 35 | """ 36 | assert points.size(0) == gt_bboxes.size(0) 37 | assert points.size(-1) == 2 38 | assert gt_bboxes.size(-1) == 4 39 | return bbox2distance(points, gt_bboxes, max_dis, eps) 40 | 41 | def decode(self, points, pred_bboxes, max_shape=None): 42 | """Decode distance prediction to bounding box. 43 | 44 | Args: 45 | points (Tensor): Shape (B, N, 2) or (N, 2). 46 | pred_bboxes (Tensor): Distance from the given point to 4 47 | boundaries (left, top, right, bottom). Shape (B, N, 4) 48 | or (N, 4) 49 | max_shape (Sequence[int] or torch.Tensor or Sequence[ 50 | Sequence[int]],optional): Maximum bounds for boxes, specifies 51 | (H, W, C) or (H, W). If priors shape is (B, N, 4), then 52 | the max_shape should be a Sequence[Sequence[int]], 53 | and the length of max_shape should also be B. 54 | Default None. 55 | Returns: 56 | Tensor: Boxes with shape (N, 4) or (B, N, 4) 57 | """ 58 | assert points.size(0) == pred_bboxes.size(0) 59 | assert points.size(-1) == 2 60 | assert pred_bboxes.size(-1) == 4 61 | if self.clip_border is False: 62 | max_shape = None 63 | return distance2bbox(points, pred_bboxes, max_shape) 64 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/coder/pseudo_bbox_coder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import BBOX_CODERS 3 | from .base_bbox_coder import BaseBBoxCoder 4 | 5 | 6 | @BBOX_CODERS.register_module() 7 | class PseudoBBoxCoder(BaseBBoxCoder): 8 | """Pseudo bounding box coder.""" 9 | 10 | def __init__(self, **kwargs): 11 | super(BaseBBoxCoder, self).__init__(**kwargs) 12 | 13 | def encode(self, bboxes, gt_bboxes): 14 | """torch.Tensor: return the given ``bboxes``""" 15 | return gt_bboxes 16 | 17 | def decode(self, bboxes, pred_bboxes): 18 | """torch.Tensor: return the given ``pred_bboxes``""" 19 | return pred_bboxes 20 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/demodata.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import numpy as np 3 | import torch 4 | 5 | from rsidet.utils.util_random import ensure_rng 6 | 7 | 8 | def random_boxes(num=1, scale=1, rng=None): 9 | """Simple version of ``kwimage.Boxes.random`` 10 | 11 | Returns: 12 | Tensor: shape (n, 4) in x1, y1, x2, y2 format. 13 | 14 | References: 15 | https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 16 | 17 | Example: 18 | >>> num = 3 19 | >>> scale = 512 20 | >>> rng = 0 21 | >>> boxes = random_boxes(num, scale, rng) 22 | >>> print(boxes) 23 | tensor([[280.9925, 278.9802, 308.6148, 366.1769], 24 | [216.9113, 330.6978, 224.0446, 456.5878], 25 | [405.3632, 196.3221, 493.3953, 270.7942]]) 26 | """ 27 | rng = ensure_rng(rng) 28 | 29 | tlbr = rng.rand(num, 4).astype(np.float32) 30 | 31 | tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) 32 | tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) 33 | br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) 34 | br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) 35 | 36 | tlbr[:, 0] = tl_x * scale 37 | tlbr[:, 1] = tl_y * scale 38 | tlbr[:, 2] = br_x * scale 39 | tlbr[:, 3] = br_y * scale 40 | 41 | boxes = torch.from_numpy(tlbr) 42 | return boxes 43 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/iou_calculators/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import build_iou_calculator 3 | from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps 4 | 5 | __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] 6 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/iou_calculators/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import Registry, build_from_cfg 3 | 4 | IOU_CALCULATORS = Registry('IoU calculator') 5 | 6 | 7 | def build_iou_calculator(cfg, default_args=None): 8 | """Builder of IoU calculator.""" 9 | return build_from_cfg(cfg, IOU_CALCULATORS, default_args) 10 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/match_costs/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import build_match_cost 3 | from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost, 4 | DiceCost, FocalLossCost, IoUCost) 5 | 6 | __all__ = [ 7 | 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 8 | 'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost' 9 | ] 10 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/match_costs/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import Registry, build_from_cfg 3 | 4 | MATCH_COST = Registry('Match Cost') 5 | 6 | 7 | def build_match_cost(cfg, default_args=None): 8 | """Builder of IoU calculator.""" 9 | return build_from_cfg(cfg, MATCH_COST, default_args) 10 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_sampler import BaseSampler 3 | # from .combined_sampler import CombinedSampler 4 | # from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 5 | # from .iou_balanced_neg_sampler import IoUBalancedNegSampler 6 | # from .mask_pseudo_sampler import MaskPseudoSampler 7 | # from .mask_sampling_result import MaskSamplingResult 8 | # from .ohem_sampler import OHEMSampler 9 | # from .pseudo_sampler import PseudoSampler 10 | from .random_sampler import RandomSampler 11 | # from .sampling_result import SamplingResult 12 | # from .score_hlr_sampler import ScoreHLRSampler 13 | 14 | # __all__ = [ 15 | # 'BaseSampler', 'PseudoSampler', 'RandomSampler', 16 | # 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 17 | # 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler', 18 | # 'MaskSamplingResult' 19 | # ] 20 | __all__ = [ 21 | 'BaseSampler', 'RandomSampler', 22 | ] 23 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import BBOX_SAMPLERS, build_sampler 3 | from .base_sampler import BaseSampler 4 | 5 | 6 | @BBOX_SAMPLERS.register_module() 7 | class CombinedSampler(BaseSampler): 8 | """A sampler that combines positive sampler and negative sampler.""" 9 | 10 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 11 | super(CombinedSampler, self).__init__(**kwargs) 12 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 13 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 14 | 15 | def _sample_pos(self, **kwargs): 16 | """Sample positive samples.""" 17 | raise NotImplementedError 18 | 19 | def _sample_neg(self, **kwargs): 20 | """Sample negative samples.""" 21 | raise NotImplementedError 22 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/samplers/instance_balanced_pos_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import numpy as np 3 | import torch 4 | 5 | from ..builder import BBOX_SAMPLERS 6 | from .random_sampler import RandomSampler 7 | 8 | 9 | @BBOX_SAMPLERS.register_module() 10 | class InstanceBalancedPosSampler(RandomSampler): 11 | """Instance balanced sampler that samples equal number of positive samples 12 | for each instance.""" 13 | 14 | def _sample_pos(self, assign_result, num_expected, **kwargs): 15 | """Sample positive boxes. 16 | 17 | Args: 18 | assign_result (:obj:`AssignResult`): The assigned results of boxes. 19 | num_expected (int): The number of expected positive samples 20 | 21 | Returns: 22 | Tensor or ndarray: sampled indices. 23 | """ 24 | pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) 25 | if pos_inds.numel() != 0: 26 | pos_inds = pos_inds.squeeze(1) 27 | if pos_inds.numel() <= num_expected: 28 | return pos_inds 29 | else: 30 | unique_gt_inds = assign_result.gt_inds[pos_inds].unique() 31 | num_gts = len(unique_gt_inds) 32 | num_per_gt = int(round(num_expected / float(num_gts)) + 1) 33 | sampled_inds = [] 34 | for i in unique_gt_inds: 35 | inds = torch.nonzero( 36 | assign_result.gt_inds == i.item(), as_tuple=False) 37 | if inds.numel() != 0: 38 | inds = inds.squeeze(1) 39 | else: 40 | continue 41 | if len(inds) > num_per_gt: 42 | inds = self.random_choice(inds, num_per_gt) 43 | sampled_inds.append(inds) 44 | sampled_inds = torch.cat(sampled_inds) 45 | if len(sampled_inds) < num_expected: 46 | num_extra = num_expected - len(sampled_inds) 47 | extra_inds = np.array( 48 | list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) 49 | if len(extra_inds) > num_extra: 50 | extra_inds = self.random_choice(extra_inds, num_extra) 51 | extra_inds = torch.from_numpy(extra_inds).to( 52 | assign_result.gt_inds.device).long() 53 | sampled_inds = torch.cat([sampled_inds, extra_inds]) 54 | elif len(sampled_inds) > num_expected: 55 | sampled_inds = self.random_choice(sampled_inds, num_expected) 56 | return sampled_inds 57 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/samplers/mask_pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | """copy from 3 | https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" 4 | 5 | import torch 6 | 7 | from rsidet.core.bbox.builder import BBOX_SAMPLERS 8 | from .base_sampler import BaseSampler 9 | from .mask_sampling_result import MaskSamplingResult 10 | 11 | 12 | @BBOX_SAMPLERS.register_module() 13 | class MaskPseudoSampler(BaseSampler): 14 | """A pseudo sampler that does not do sampling actually.""" 15 | 16 | def __init__(self, **kwargs): 17 | pass 18 | 19 | def _sample_pos(self, **kwargs): 20 | """Sample positive samples.""" 21 | raise NotImplementedError 22 | 23 | def _sample_neg(self, **kwargs): 24 | """Sample negative samples.""" 25 | raise NotImplementedError 26 | 27 | def sample(self, assign_result, masks, gt_masks, **kwargs): 28 | """Directly returns the positive and negative indices of samples. 29 | 30 | Args: 31 | assign_result (:obj:`AssignResult`): Assigned results 32 | masks (torch.Tensor): Bounding boxes 33 | gt_masks (torch.Tensor): Ground truth boxes 34 | Returns: 35 | :obj:`SamplingResult`: sampler results 36 | """ 37 | pos_inds = torch.nonzero( 38 | assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() 39 | neg_inds = torch.nonzero( 40 | assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() 41 | gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) 42 | sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, 43 | gt_masks, assign_result, gt_flags) 44 | return sampling_result 45 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/samplers/mask_sampling_result.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | """copy from 3 | https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" 4 | 5 | import torch 6 | 7 | from .sampling_result import SamplingResult 8 | 9 | 10 | class MaskSamplingResult(SamplingResult): 11 | """Mask sampling result.""" 12 | 13 | def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result, 14 | gt_flags): 15 | self.pos_inds = pos_inds 16 | self.neg_inds = neg_inds 17 | self.pos_masks = masks[pos_inds] 18 | self.neg_masks = masks[neg_inds] 19 | self.pos_is_gt = gt_flags[pos_inds] 20 | 21 | self.num_gts = gt_masks.shape[0] 22 | self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 23 | 24 | if gt_masks.numel() == 0: 25 | # hack for index error case 26 | assert self.pos_assigned_gt_inds.numel() == 0 27 | self.pos_gt_masks = torch.empty_like(gt_masks) 28 | else: 29 | self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] 30 | 31 | if assign_result.labels is not None: 32 | self.pos_gt_labels = assign_result.labels[pos_inds] 33 | else: 34 | self.pos_gt_labels = None 35 | 36 | @property 37 | def masks(self): 38 | """torch.Tensor: concatenated positive and negative boxes""" 39 | return torch.cat([self.pos_masks, self.neg_masks]) 40 | 41 | def __nice__(self): 42 | data = self.info.copy() 43 | data['pos_masks'] = data.pop('pos_masks').shape 44 | data['neg_masks'] = data.pop('neg_masks').shape 45 | parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] 46 | body = ' ' + ',\n '.join(parts) 47 | return '{\n' + body + '\n}' 48 | 49 | @property 50 | def info(self): 51 | """Returns a dictionary of info about the object.""" 52 | return { 53 | 'pos_inds': self.pos_inds, 54 | 'neg_inds': self.neg_inds, 55 | 'pos_masks': self.pos_masks, 56 | 'neg_masks': self.neg_masks, 57 | 'pos_is_gt': self.pos_is_gt, 58 | 'num_gts': self.num_gts, 59 | 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, 60 | } 61 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/bbox/samplers/pseudo_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from ..builder import BBOX_SAMPLERS 5 | from .base_sampler import BaseSampler 6 | from .sampling_result import SamplingResult 7 | 8 | 9 | @BBOX_SAMPLERS.register_module() 10 | class PseudoSampler(BaseSampler): 11 | """A pseudo sampler that does not do sampling actually.""" 12 | 13 | def __init__(self, **kwargs): 14 | pass 15 | 16 | def _sample_pos(self, **kwargs): 17 | """Sample positive samples.""" 18 | raise NotImplementedError 19 | 20 | def _sample_neg(self, **kwargs): 21 | """Sample negative samples.""" 22 | raise NotImplementedError 23 | 24 | def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs): 25 | """Directly returns the positive and negative indices of samples. 26 | 27 | Args: 28 | assign_result (:obj:`AssignResult`): Assigned results 29 | bboxes (torch.Tensor): Bounding boxes 30 | gt_bboxes (torch.Tensor): Ground truth boxes 31 | 32 | Returns: 33 | :obj:`SamplingResult`: sampler results 34 | """ 35 | pos_inds = torch.nonzero( 36 | assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() 37 | neg_inds = torch.nonzero( 38 | assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() 39 | gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) 40 | sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, 41 | assign_result, gt_flags) 42 | return sampling_result 43 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .eval_hooks import QuerySupportDistEvalHook, QuerySupportEvalHook 3 | from .mean_ap import eval_map 4 | 5 | __all__ = ['QuerySupportEvalHook', 'QuerySupportDistEvalHook', 'eval_map'] 6 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/hook/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .wandblogger_hook import RSIDetWandbHook 3 | 4 | # __all__ = ['MMDetWandbHook'] 5 | __all__ = ['RSIDetWandbHook'] 6 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .custom_hook import ContrastiveLossDecayHook 3 | 4 | __all__ = ['ContrastiveLossDecayHook'] 5 | -------------------------------------------------------------------------------- /rsifewshot/detection/core/utils/custom_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Sequence 3 | 4 | from mmcv.parallel import is_module_wrapper 5 | from mmcv.runner import HOOKS, Hook, Runner 6 | 7 | 8 | @HOOKS.register_module() 9 | class ContrastiveLossDecayHook(Hook): 10 | """Hook for contrast loss weight decay used in FSCE. 11 | 12 | Args: 13 | decay_steps (list[int] | tuple[int]): Each item in the list is 14 | the step to decay the loss weight. 15 | decay_rate (float): Decay rate. Default: 0.5. 16 | """ 17 | 18 | def __init__(self, 19 | decay_steps: Sequence[int], 20 | decay_rate: float = 0.5) -> None: 21 | assert isinstance( 22 | decay_steps, 23 | (list, tuple)), '`decay_steps` should be list or tuple.' 24 | self.decay_steps = decay_steps 25 | self.decay_rate = decay_rate 26 | 27 | def before_iter(self, runner: Runner) -> None: 28 | runner_iter = runner.iter + 1 29 | decay_rate = 1.0 30 | # update decay rate by number of iteration 31 | for step in self.decay_steps: 32 | if runner_iter > step: 33 | decay_rate *= self.decay_rate 34 | # set decay rate in the bbox_head 35 | if is_module_wrapper(runner.model): 36 | runner.model.module.roi_head.bbox_head.set_decay_rate(decay_rate) 37 | else: 38 | runner.model.roi_head.bbox_head.set_decay_rate(decay_rate) 39 | -------------------------------------------------------------------------------- /rsifewshot/detection/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import BaseFewShotDataset 3 | from .builder import build_dataloader, build_dataset 4 | from .coco import COCO_SPLIT, FewShotCocoDataset 5 | from .dataloader_wrappers import NWayKShotDataloader 6 | from .dataset_wrappers import NWayKShotDataset, QueryAwareDataset 7 | from .pipelines import CropResizeInstance, GenerateMask 8 | from .utils import NumpyEncoder, get_copy_dataset_type 9 | from .voc import VOC_SPLIT, FewShotVOCDataset 10 | from .dior import DIOR_SPLIT, FewShotDIORDataset 11 | from .dumpsite import DUMPSITE_SPLIT, FewShotDumpsiteDataset 12 | from .nwpu import NWPU_SPLIT, FewShotNWPUDataset 13 | from .dior_caption import FewShotDIORCaptionDataset 14 | from .isaid import ISAID_SPLIT, FewShotISAIDDataset 15 | 16 | __all__ = [ 17 | 'build_dataloader', 'build_dataset', 'QueryAwareDataset', 18 | 'NWayKShotDataset', 'NWayKShotDataloader', 'BaseFewShotDataset', 19 | 'FewShotVOCDataset', 'FewShotCocoDataset', 'CropResizeInstance', 20 | 'GenerateMask', 'NumpyEncoder', 'COCO_SPLIT', 'VOC_SPLIT', 21 | 'get_copy_dataset_type', 'DIOR_SPLIT', 'FewShotDIORDataset', 22 | 'FewShotISAIDDataset', 'FewShotDIORCaptionDataset', 23 | 'NWPU_SPLIT', 'FewShotNWPUDataset', 'FewShotDumpsiteDataset', 'DUMPSITE_SPLIT' 24 | ] 25 | -------------------------------------------------------------------------------- /rsifewshot/detection/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .formatting import MultiImageCollect, MultiImageFormatBundle 3 | from .transforms import (CropInstance, CropResizeInstance, GenerateMask, 4 | MultiImageNormalize, MultiImagePad, 5 | MultiImageRandomCrop, MultiImageRandomFlip, 6 | ResizeToMultiScale) 7 | # from .rsi_aug import RandomRotate90 8 | 9 | __all__ = [ 10 | 'CropResizeInstance', 'GenerateMask', 'CropInstance', 'ResizeToMultiScale', 11 | 'MultiImageNormalize', 'MultiImageFormatBundle', 'MultiImageCollect', 12 | 'MultiImagePad', 'MultiImageRandomCrop', 'MultiImageRandomFlip', 13 | # 'RandomRotate90' 14 | ] 15 | -------------------------------------------------------------------------------- /rsifewshot/detection/datasets/pipelines/formatting.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict, List 3 | 4 | from mmcv.parallel import DataContainer as DC 5 | from rsidet.datasets.builder import PIPELINES 6 | from rsidet.datasets.pipelines import Collect, DefaultFormatBundle 7 | 8 | 9 | @PIPELINES.register_module() 10 | class MultiImageFormatBundle(DefaultFormatBundle): 11 | 12 | def __call__(self, results_list: List[Dict]) -> List[Dict]: 13 | """Transform and format common fields of each results in 14 | `results_list`. 15 | 16 | Args: 17 | results_list (list[dict]): List of result dict contains the data 18 | to convert. 19 | 20 | Returns: 21 | list[dict]: List of result dict contains the data that is formatted 22 | with default bundle. 23 | """ 24 | for results in results_list: 25 | super().__call__(results) 26 | return results_list 27 | 28 | 29 | @PIPELINES.register_module() 30 | class MultiImageCollect(Collect): 31 | 32 | def __call__(self, results_list: List[Dict]) -> Dict: 33 | """Collect all keys of each results in `results_list`. 34 | 35 | The keys in `meta_keys` will be converted to :obj:mmcv.DataContainer. 36 | A scale suffix also will be added to each key to specific from which 37 | scale of results. 38 | 39 | Args: 40 | results_list (list[dict]): List of result dict contains the data 41 | to collect. 42 | 43 | Returns: 44 | dict: The result dict contains the following keys 45 | 46 | - `{key}_scale_{i}` for i in 'num_scales' for key in`self.keys` 47 | - `img_metas_scale_{i}` for i in 'num_scales' 48 | """ 49 | data = {} 50 | for i, results in enumerate(results_list): 51 | img_meta = {key: results[key] for key in self.meta_keys} 52 | data[f'img_metas_scale{i}'] = DC(img_meta, cpu_only=True) 53 | for key in self.keys: 54 | data[f'{key}_scale_{i}'] = results[key] 55 | return data 56 | -------------------------------------------------------------------------------- /rsifewshot/detection/datasets/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import json 3 | 4 | import numpy as np 5 | 6 | 7 | class NumpyEncoder(json.JSONEncoder): 8 | """Save numpy array obj to json.""" 9 | 10 | def default(self, obj: object) -> object: 11 | if isinstance(obj, np.ndarray): 12 | return obj.tolist() 13 | return json.JSONEncoder.default(self, obj) 14 | 15 | 16 | def get_copy_dataset_type(dataset_type: str) -> str: 17 | """Return corresponding copy dataset type.""" 18 | if dataset_type in ['FewShotVOCDataset', 'FewShotVOCDefaultDataset']: 19 | copy_dataset_type = 'FewShotVOCCopyDataset' 20 | elif dataset_type in ['FewShotCocoDataset', 'FewShotCocoDefaultDataset']: 21 | copy_dataset_type = 'FewShotCocoCopyDataset' 22 | elif dataset_type in ['FewShotDIORDataset', 'FewShotDIORDefaultDataset']: 23 | copy_dataset_type = 'FewShotDIORCopyDataset' 24 | else: 25 | raise TypeError(f'{dataset_type} ' 26 | f'not support copy data_infos operation.') 27 | 28 | return copy_dataset_type 29 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from rsidet.models.builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 3 | ROI_EXTRACTORS, SHARED_HEADS, build_backbone, 4 | build_head, build_loss, build_neck, 5 | build_roi_extractor, build_shared_head) 6 | 7 | from .backbones import * # noqa: F401,F403 8 | from .builder import build_detector 9 | from .dense_heads import * # noqa: F401,F403 10 | from .detectors import * # noqa: F401,F403 11 | from .losses import * # noqa: F401,F403 12 | from .roi_heads import * # noqa: F401,F403 13 | from .utils import * # noqa: F401,F403 14 | 15 | __all__ = [ 16 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 17 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 18 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 19 | ] 20 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .resnet_with_meta_conv import ResNetWithMetaConv 3 | 4 | __all__ = ['ResNetWithMetaConv'] 5 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/backbones/resnet_with_meta_conv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Tuple 3 | 4 | from mmcv.cnn import build_conv_layer 5 | from rsidet.models import ResNet 6 | from rsidet.models.builder import BACKBONES 7 | from torch import Tensor 8 | 9 | 10 | @BACKBONES.register_module() 11 | class ResNetWithMetaConv(ResNet): 12 | """ResNet with `meta_conv` to handle different inputs in metarcnn and 13 | fsdetview. 14 | 15 | When input with shape (N, 3, H, W) from images, the network will use 16 | `conv1` as regular ResNet. When input with shape (N, 4, H, W) from (image + 17 | mask) the network will replace `conv1` with `meta_conv` to handle 18 | additional channel. 19 | """ 20 | 21 | def __init__(self, **kwargs) -> None: 22 | super().__init__(**kwargs) 23 | self.meta_conv = build_conv_layer( 24 | self.conv_cfg, # from config of ResNet 25 | 4, 26 | 64, 27 | kernel_size=7, 28 | stride=2, 29 | padding=3, 30 | bias=False) 31 | 32 | def forward(self, x: Tensor, use_meta_conv: bool = False) -> Tuple[Tensor]: 33 | """Forward function. 34 | 35 | When input with shape (N, 3, H, W) from images, the network will use 36 | `conv1` as regular ResNet. When input with shape (N, 4, H, W) from 37 | (image + mask) the network will replace `conv1` with `meta_conv` to 38 | handle additional channel. 39 | 40 | Args: 41 | x (Tensor): Tensor with shape (N, 3, H, W) from images 42 | or (N, 4, H, W) from (images + masks). 43 | use_meta_conv (bool): If set True, forward input tensor with 44 | `meta_conv` which require tensor with shape (N, 4, H, W). 45 | Otherwise, forward input tensor with `conv1` which require 46 | tensor with shape (N, 3, H, W). Default: False. 47 | 48 | Returns: 49 | tuple[Tensor]: Tuple of features, each item with 50 | shape (N, C, H, W). 51 | """ 52 | if use_meta_conv: 53 | x = self.meta_conv(x) 54 | else: 55 | x = self.conv1(x) 56 | x = self.norm1(x) 57 | x = self.relu(x) 58 | x = self.maxpool(x) 59 | outs = [] 60 | for i, layer_name in enumerate(self.res_layers): 61 | res_layer = getattr(self, layer_name) 62 | x = res_layer(x) 63 | if i in self.out_indices: 64 | outs.append(x) 65 | return tuple(outs) 66 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Optional 3 | 4 | from mmcv.utils import ConfigDict, print_log 5 | from rsidet.models.builder import DETECTORS 6 | 7 | def build_detector(cfg: ConfigDict, logger: Optional[object] = None): 8 | """Build detector.""" 9 | # get the prefix of fixed parameters 10 | frozen_parameters = cfg.pop('frozen_parameters', None) 11 | 12 | model = DETECTORS.build(cfg) 13 | model.init_weights() 14 | # freeze parameters by prefix 15 | if frozen_parameters is not None: 16 | print_log(f'Frozen parameters: {frozen_parameters}', logger) 17 | for name, param in model.named_parameters(): 18 | for frozen_prefix in frozen_parameters: 19 | if frozen_prefix in name: 20 | param.requires_grad = False 21 | if param.requires_grad: 22 | print_log(f'Training parameters: {name}', logger) 23 | return model 24 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/dense_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .attention_rpn_head import AttentionRPNHead 3 | from .two_branch_rpn_head import TwoBranchRPNHead 4 | from .neg_rpn_head import NegRPNHead 5 | from .ufd_rpn_head import UFDRPNHead 6 | from .st_rpn_head import STRPNHead 7 | from .st_rpn_head_v2 import STRPNHeadV2 8 | from .st_rpn_head_v3 import STRPNHeadV3 9 | from .st_rpn_head_v4 import STRPNHeadV4 10 | from .sam_head import SAMHead 11 | 12 | __all__ = ['AttentionRPNHead', 'TwoBranchRPNHead', 'NegRPNHead', 'UFDRPNHead', 'STRPNHead', 13 | 'STRPNHeadV2', 'STRPNHeadV3', 'STRPNHeadV4', 'SAMHead'] 14 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .attention_rpn_detector import AttentionRPNDetector 3 | from .fsce import FSCE 4 | from .fsdetview import FSDetView 5 | from .meta_rcnn import MetaRCNN 6 | from .mpsr import MPSR 7 | from .query_support_detector import QuerySupportDetector 8 | from .tfa import TFA 9 | from .neg_rpn import NegRPNTFA 10 | from .neg_rpn_query_support_detector import NegRPNQuerySupportDetector 11 | from .neg_rpn_meta_rcnn import NegRPNMetaRCNN 12 | from .st_tfa import STTFA 13 | from .st_tfa_v2 import STTFAV2 14 | 15 | __all__ = [ 16 | 'QuerySupportDetector', 'AttentionRPNDetector', 'FSCE', 'FSDetView', 'TFA', 17 | 'MPSR', 'MetaRCNN', 'NegRPNTFA', 'NegRPNQuerySupportDetector', 'NegRPNMetaRCNN', 18 | 'STTFA', 'STTFAV2' 19 | ] 20 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/detectors/fsce.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from rsidet.models.builder import DETECTORS 3 | from rsidet.models.detectors.two_stage import TwoStageDetector 4 | 5 | 6 | @DETECTORS.register_module() 7 | class FSCE(TwoStageDetector): 8 | """Implementation of `FSCE `_""" 9 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/detectors/fsdetview.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from rsidet.models.builder import DETECTORS 3 | 4 | from .meta_rcnn import MetaRCNN 5 | 6 | 7 | @DETECTORS.register_module() 8 | class FSDetView(MetaRCNN): 9 | """Implementation of `FSDetView `_.""" 10 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/detectors/neg_rpn_meta_rcnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import copy 3 | from typing import Dict, List, Optional 4 | 5 | import torch 6 | from mmcv.runner import auto_fp16 7 | from mmcv.utils import ConfigDict 8 | from rsidet.models.builder import DETECTORS 9 | from torch import Tensor 10 | 11 | from .neg_rpn_query_support_detector import NegRPNQuerySupportDetector 12 | from .meta_rcnn import MetaRCNN 13 | 14 | 15 | @DETECTORS.register_module() 16 | class NegRPNMetaRCNN(NegRPNQuerySupportDetector, MetaRCNN): 17 | pass 18 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/detectors/tfa.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict, List, Optional, Union 3 | import pdb 4 | 5 | from rsidet.models.builder import DETECTORS 6 | from rsidet.models.detectors.two_stage import TwoStageDetector 7 | 8 | 9 | @DETECTORS.register_module() 10 | class TFA(TwoStageDetector): 11 | """Implementation of `TFA `_""" 12 | 13 | """ 14 | def train_step(self, data: Dict, optimizer: Union[object, Dict]) -> Dict: 15 | pdb.set_trace() 16 | losses = self(**data) 17 | loss, log_vars = self._parse_losses(losses) 18 | 19 | # For most of query-support detectors, the batch size denote the 20 | # batch size of query data. 21 | outputs = dict( 22 | loss=loss, 23 | log_vars=log_vars, 24 | num_samples=len(data['query_data']['img_metas'])) 25 | 26 | return outputs 27 | """ 28 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .supervised_contrastive_loss import SupervisedContrastiveLoss 3 | from .token_sigmoid_focal_loss import TokenSigmoidFocalLoss 4 | 5 | __all__ = ['SupervisedContrastiveLoss', 'TokenSigmoidFocalLoss'] 6 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .bbox_heads import (ContrastiveBBoxHead, CosineSimBBoxHead, 3 | MultiRelationBBoxHead) 4 | from .contrastive_roi_head import ContrastiveRoIHead 5 | from .fsdetview_roi_head import FSDetViewRoIHead 6 | from .meta_rcnn_roi_head import MetaRCNNRoIHead 7 | from .multi_relation_roi_head import MultiRelationRoIHead 8 | from .shared_heads import MetaRCNNResLayer 9 | from .two_branch_roi_head import TwoBranchRoIHead 10 | from .neg_rpn_roi_head import NegRPNRoIHead 11 | from .neg_rpn_meta_rcnn_roi_head import NegRPNMetaRCNNRoIHead 12 | from .neg_rpn_fsdetview_roi_head import NegRPNFSDetViewRoIHead 13 | from .st_roi_head import STRoIHead 14 | from .st_roi_head_v2 import STRoIHeadV2 15 | from .st_roi_head_v4 import STRoIHeadV4 16 | 17 | __all__ = [ 18 | 'CosineSimBBoxHead', 'ContrastiveBBoxHead', 'MultiRelationBBoxHead', 19 | 'ContrastiveRoIHead', 'MultiRelationRoIHead', 'FSDetViewRoIHead', 20 | 'MetaRCNNRoIHead', 'MetaRCNNResLayer', 'TwoBranchRoIHead', 21 | 'NegRPNRoIHead', 'NegRPNMetaRCNNRoIHead', 'NegRPNFSDetViewRoIHead', 22 | 'STRoIHead', 'STRoIHeadV2', 'STRoIHeadV4' 23 | ] 24 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/roi_heads/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .contrastive_bbox_head import ContrastiveBBoxHead 3 | from .cosine_sim_bbox_head import CosineSimBBoxHead 4 | from .meta_bbox_head import MetaBBoxHead 5 | from .multi_relation_bbox_head import MultiRelationBBoxHead 6 | from .two_branch_bbox_head import TwoBranchBBoxHead 7 | from .cosine_sim_st_bbox_head import CosineSimSTBBoxHead 8 | from .cosine_sim_st_bbox_head_v2 import CosineSimSTBBoxHeadV2 9 | 10 | __all__ = [ 11 | 'CosineSimBBoxHead', 'ContrastiveBBoxHead', 'MultiRelationBBoxHead', 12 | 'MetaBBoxHead', 'TwoBranchBBoxHead', 'CosineSimSTBBoxHead', 'CosineSimSTBBoxHeadV2' 13 | ] 14 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/roi_heads/fsdetview_roi_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict, Optional 3 | 4 | import torch 5 | from rsidet.models.builder import HEADS 6 | from torch import Tensor 7 | 8 | from .meta_rcnn_roi_head import MetaRCNNRoIHead 9 | 10 | 11 | @HEADS.register_module() 12 | class FSDetViewRoIHead(MetaRCNNRoIHead): 13 | """Roi head for `FSDetView `_. 14 | 15 | Args: 16 | aggregation_layer (dict): Config of `aggregation_layer`. 17 | Default: None. 18 | """ 19 | 20 | def __init__(self, 21 | aggregation_layer: Optional[Dict] = None, 22 | **kwargs) -> None: 23 | super().__init__(aggregation_layer=aggregation_layer, **kwargs) 24 | 25 | def _bbox_forward(self, query_roi_feats: Tensor, 26 | support_roi_feats: Tensor) -> Dict: 27 | """Box head forward function used in both training and testing. 28 | 29 | Args: 30 | query_roi_feats (Tensor): Roi features with shape (N, C). 31 | support_roi_feats (Tensor): Roi features with shape (1, C). 32 | 33 | Returns: 34 | dict: A dictionary of predicted results. 35 | """ 36 | # feature aggregation 37 | roi_feats = self.aggregation_layer( 38 | query_feat=query_roi_feats.unsqueeze(-1).unsqueeze(-1), 39 | support_feat=support_roi_feats.view(1, -1, 1, 1)) 40 | roi_feats = torch.cat(roi_feats, dim=1) 41 | roi_feats = torch.cat((roi_feats, query_roi_feats), dim=1) 42 | cls_score, bbox_pred = self.bbox_head(roi_feats) 43 | bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) 44 | return bbox_results 45 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/roi_heads/neg_rpn_fsdetview_roi_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict, Optional 3 | 4 | import torch 5 | from rsidet.models.builder import HEADS 6 | from torch import Tensor 7 | 8 | from .meta_rcnn_roi_head import MetaRCNNRoIHead 9 | from .neg_rpn_meta_rcnn_roi_head import NegRPNMetaRCNNRoIHead 10 | 11 | 12 | @HEADS.register_module() 13 | class NegRPNFSDetViewRoIHead(NegRPNMetaRCNNRoIHead): 14 | """Roi head for `FSDetView `_. 15 | 16 | Args: 17 | aggregation_layer (dict): Config of `aggregation_layer`. 18 | Default: None. 19 | """ 20 | 21 | def __init__(self, 22 | aggregation_layer: Optional[Dict] = None, 23 | **kwargs) -> None: 24 | super().__init__(aggregation_layer=aggregation_layer, **kwargs) 25 | 26 | def _bbox_forward(self, query_roi_feats: Tensor, 27 | support_roi_feats: Tensor) -> Dict: 28 | """Box head forward function used in both training and testing. 29 | 30 | Args: 31 | query_roi_feats (Tensor): Roi features with shape (N, C). 32 | support_roi_feats (Tensor): Roi features with shape (1, C). 33 | 34 | Returns: 35 | dict: A dictionary of predicted results. 36 | """ 37 | # feature aggregation 38 | roi_feats = self.aggregation_layer( 39 | query_feat=query_roi_feats.unsqueeze(-1).unsqueeze(-1), 40 | support_feat=support_roi_feats.view(1, -1, 1, 1)) 41 | roi_feats = torch.cat(roi_feats, dim=1) 42 | roi_feats = torch.cat((roi_feats, query_roi_feats), dim=1) 43 | cls_score, bbox_pred = self.bbox_head(roi_feats) 44 | bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) 45 | return bbox_results 46 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/roi_heads/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .meta_rcnn_res_layer import MetaRCNNResLayer 3 | 4 | __all__ = ['MetaRCNNResLayer'] 5 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/roi_heads/shared_heads/meta_rcnn_res_layer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch.nn as nn 3 | from rsidet.models.builder import SHARED_HEADS 4 | from rsidet.models.roi_heads import ResLayer 5 | from torch import Tensor 6 | 7 | 8 | @SHARED_HEADS.register_module() 9 | class MetaRCNNResLayer(ResLayer): 10 | """Shared resLayer for metarcnn and fsdetview. 11 | 12 | It provides different forward logics for query and support images. 13 | """ 14 | 15 | def __init__(self, *args, **kwargs): 16 | super().__init__(*args, **kwargs) 17 | self.max_pool = nn.MaxPool2d(2) 18 | self.sigmoid = nn.Sigmoid() 19 | 20 | def forward(self, x: Tensor) -> Tensor: 21 | """Forward function for query images. 22 | 23 | Args: 24 | x (Tensor): Features from backbone with shape (N, C, H, W). 25 | 26 | Returns: 27 | Tensor: Shape of (N, C). 28 | """ 29 | res_layer = getattr(self, f'layer{self.stage + 1}') 30 | out = res_layer(x) 31 | out = out.mean(3).mean(2) 32 | return out 33 | 34 | def forward_support(self, x: Tensor) -> Tensor: 35 | """Forward function for support images. 36 | 37 | Args: 38 | x (Tensor): Features from backbone with shape (N, C, H, W). 39 | 40 | Returns: 41 | Tensor: Shape of (N, C). 42 | """ 43 | x = self.max_pool(x) 44 | res_layer = getattr(self, f'layer{self.stage + 1}') 45 | out = res_layer(x) 46 | out = self.sigmoid(out) 47 | out = out.mean(3).mean(2) 48 | return out 49 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/roi_heads/two_branch_roi_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import Dict, List, Tuple 3 | 4 | import torch 5 | from rsidet.models.builder import HEADS 6 | from rsidet.models.roi_heads import StandardRoIHead 7 | from torch import Tensor 8 | 9 | 10 | @HEADS.register_module() 11 | class TwoBranchRoIHead(StandardRoIHead): 12 | """RoI head for `MPSR `_.""" 13 | 14 | def forward_auxiliary_train(self, feats: Tuple[Tensor], 15 | gt_labels: List[Tensor]) -> Dict: 16 | """Forward function and calculate loss for auxiliary data in training. 17 | 18 | Args: 19 | feats (tuple[Tensor]): List of features at multiple scales, each 20 | is a 4D-tensor. 21 | gt_labels (list[Tensor]): List of class indices corresponding 22 | to each features, each is a 4D-tensor. 23 | 24 | Returns: 25 | dict[str, Tensor]: a dictionary of loss components 26 | """ 27 | # bbox head forward and loss 28 | auxiliary_losses = self._bbox_forward_auxiliary_train(feats, gt_labels) 29 | return auxiliary_losses 30 | 31 | def _bbox_forward_auxiliary_train(self, feats: Tuple[Tensor], 32 | gt_labels: List[Tensor]) -> Dict: 33 | """Run forward function and calculate loss for box head in training. 34 | 35 | Args: 36 | feats (tuple[Tensor]): List of features at multiple scales, each 37 | is a 4D-tensor. 38 | gt_labels (list[Tensor]): List of class indices corresponding 39 | to each features, each is a 4D-tensor. 40 | 41 | Returns: 42 | dict[str, Tensor]: a dictionary of loss components 43 | """ 44 | cls_scores, = self.bbox_head.forward_auxiliary(feats) 45 | cls_score = torch.cat(cls_scores, dim=0) 46 | labels = torch.cat(gt_labels, dim=0) 47 | label_weights = torch.ones_like(labels) 48 | losses = self.bbox_head.auxiliary_loss(cls_score, labels, 49 | label_weights) 50 | 51 | return losses 52 | -------------------------------------------------------------------------------- /rsifewshot/detection/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .aggregation_layer import * # noqa: F401,F403 3 | -------------------------------------------------------------------------------- /rsifewshot/detection/wandb/__pycache__/det_wandb_visualizer.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhu-xlab/ST-FSOD/18ee6e3346589ea72b7cfa24347baaee3727408a/rsifewshot/detection/wandb/__pycache__/det_wandb_visualizer.cpython-310.pyc -------------------------------------------------------------------------------- /rsifewshot/detection/wandb/__pycache__/det_wandb_visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhu-xlab/ST-FSOD/18ee6e3346589ea72b7cfa24347baaee3727408a/rsifewshot/detection/wandb/__pycache__/det_wandb_visualizer.cpython-37.pyc -------------------------------------------------------------------------------- /rsifewshot/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .collate import multi_pipeline_collate_fn 3 | from .compat_config import compat_cfg 4 | from .dist_utils import check_dist_init, sync_random_seed 5 | from .infinite_sampler import (DistributedInfiniteGroupSampler, 6 | DistributedInfiniteSampler, 7 | InfiniteGroupSampler, InfiniteSampler) 8 | from .local_seed import local_numpy_seed 9 | from .logger import get_root_logger 10 | from .runner import InfiniteEpochBasedRunner 11 | 12 | __all__ = [ 13 | 'multi_pipeline_collate_fn', 'local_numpy_seed', 14 | 'InfiniteEpochBasedRunner', 'InfiniteSampler', 'InfiniteGroupSampler', 15 | 'DistributedInfiniteSampler', 'DistributedInfiniteGroupSampler', 16 | 'get_root_logger', 'check_dist_init', 'sync_random_seed', 'compat_cfg' 17 | ] 18 | -------------------------------------------------------------------------------- /rsifewshot/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import collect_env as collect_basic_env 3 | from mmcv.utils import get_git_hash 4 | 5 | import rsifewshot 6 | 7 | 8 | def collect_env(): 9 | env_info = collect_basic_env() 10 | env_info['MMFewShot'] = ( 11 | rsifewshot.__version__ + '+' + get_git_hash(digits=7)) 12 | return env_info 13 | 14 | 15 | if __name__ == '__main__': 16 | for name, val in collect_env().items(): 17 | print(f'{name}: {val}') 18 | -------------------------------------------------------------------------------- /rsifewshot/utils/dist_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import numpy as np 3 | import torch 4 | import torch.distributed as dist 5 | from mmcv.runner import get_dist_info 6 | 7 | 8 | def check_dist_init(): 9 | return dist.is_available() and dist.is_initialized() 10 | 11 | 12 | def sync_random_seed(seed=None, device='cuda'): 13 | """Propagating the seed of rank 0 to all other ranks. 14 | 15 | Make sure different ranks share the same seed. All workers must call 16 | this function, otherwise it will deadlock. This method is generally used in 17 | `DistributedSampler`, because the seed should be identical across all 18 | processes in the distributed group. 19 | In distributed sampling, different ranks should sample non-overlapped 20 | data in the dataset. Therefore, this function is used to make sure that 21 | each rank shuffles the data indices in the same order based 22 | on the same seed. Then different ranks could use different indices 23 | to select non-overlapped data from the same data list. 24 | Args: 25 | seed (int, Optional): The seed. Default to None. 26 | device (str): The device where the seed will be put on. 27 | Default to 'cuda'. 28 | Returns: 29 | int: Seed to be used. 30 | """ 31 | if seed is None: 32 | seed = np.random.randint(2**31) 33 | assert isinstance(seed, int) 34 | 35 | rank, world_size = get_dist_info() 36 | 37 | if world_size == 1: 38 | return seed 39 | 40 | if rank == 0: 41 | random_num = torch.tensor(seed, dtype=torch.int32, device=device) 42 | else: 43 | random_num = torch.tensor(0, dtype=torch.int32, device=device) 44 | dist.broadcast(random_num, src=0) 45 | return random_num.item() 46 | -------------------------------------------------------------------------------- /rsifewshot/utils/local_seed.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from contextlib import contextmanager 3 | from typing import Optional 4 | 5 | import numpy as np 6 | 7 | 8 | @contextmanager 9 | def local_numpy_seed(seed: Optional[int] = None) -> None: 10 | """Run numpy codes with a local random seed. 11 | 12 | If seed is None, the default random state will be used. 13 | """ 14 | state = np.random.get_state() 15 | if seed is not None: 16 | np.random.seed(seed) 17 | try: 18 | yield 19 | finally: 20 | np.random.set_state(state) 21 | -------------------------------------------------------------------------------- /rsifewshot/utils/logger.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import logging 3 | 4 | from mmcv.utils import get_logger 5 | 6 | 7 | def get_root_logger(log_file=None, log_level=logging.INFO): 8 | return get_logger('rsifewshot', log_file, log_level) 9 | -------------------------------------------------------------------------------- /rsifewshot/utils/runner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import time 3 | 4 | from mmcv.runner import EpochBasedRunner 5 | from mmcv.runner.builder import RUNNERS 6 | from torch.utils.data import DataLoader 7 | 8 | 9 | @RUNNERS.register_module() 10 | class InfiniteEpochBasedRunner(EpochBasedRunner): 11 | """Epoch-based Runner supports dataloader with InfiniteSampler. 12 | 13 | The workers of dataloader will re-initialize, when the iterator of 14 | dataloader is created. InfiniteSampler is designed to avoid these time 15 | consuming operations, since the iterator with InfiniteSampler will never 16 | reach the end. 17 | """ 18 | 19 | def train(self, data_loader: DataLoader, **kwargs) -> None: 20 | self.model.train() 21 | self.mode = 'train' 22 | self.data_loader = data_loader 23 | self._max_iters = self._max_epochs * len(self.data_loader) 24 | self.call_hook('before_train_epoch') 25 | time.sleep(2) # Prevent possible deadlock during epoch transition 26 | 27 | # To reuse the iterator, we only create iterator once and bind it 28 | # with runner. In the next epoch, the iterator will be used against 29 | if not hasattr(self, 'data_loader_iter'): 30 | self.data_loader_iter = iter(self.data_loader) 31 | 32 | # The InfiniteSampler will never reach the end, but we set the 33 | # length of InfiniteSampler to the actual length of dataset. 34 | # The length of dataloader is determined by the length of sampler, 35 | # when the sampler is not None. Therefore, we can simply forward the 36 | # whole dataset in a epoch by length of dataloader. 37 | 38 | for i in range(len(self.data_loader)): 39 | data_batch = next(self.data_loader_iter) 40 | self._inner_iter = i 41 | self.call_hook('before_train_iter') 42 | self.run_iter(data_batch, train_mode=True, **kwargs) 43 | self.call_hook('after_train_iter') 44 | self._iter += 1 45 | 46 | self.call_hook('after_train_epoch') 47 | self._epoch += 1 48 | -------------------------------------------------------------------------------- /rsifewshot/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | __version__ = '0.1.0' 3 | short_version = __version__ 4 | 5 | 6 | def parse_version_info(version_str): 7 | version_info_ = [] 8 | for x in version_str.split('.'): 9 | if x.isdigit(): 10 | version_info_.append(int(x)) 11 | elif x.find('rc') != -1: 12 | patch_version = x.split('rc') 13 | version_info_.append(int(patch_version[0])) 14 | version_info_.append(f'rc{patch_version[1]}') 15 | return tuple(version_info_) 16 | 17 | 18 | version_info = parse_version_info(__version__) 19 | -------------------------------------------------------------------------------- /scripts/meta_train_st-tfa_dior-trainval.sh: -------------------------------------------------------------------------------- 1 | # bash scripts/train_st-tfa_dior-trainval_split3_seed0.sh 2 | # bash scripts/train_st-tfa_dior-trainval_split4_seed0.sh 3 | bash scripts/train_st-tfa_dior-trainval_split5_seed0.sh 4 | 5 | bash scripts/train_st-tfa_dior-trainval_split2_seed1.sh 6 | bash scripts/train_st-tfa_dior-trainval_split2_seed2.sh 7 | bash scripts/train_st-tfa_dior-trainval_split3_seed1.sh 8 | bash scripts/train_st-tfa_dior-trainval_split3_seed2.sh 9 | bash scripts/train_st-tfa_dior-trainval_split4_seed1.sh 10 | bash scripts/train_st-tfa_dior-trainval_split4_seed2.sh 11 | bash scripts/train_st-tfa_dior-trainval_split5_seed1.sh 12 | bash scripts/train_st-tfa_dior-trainval_split5_seed2.sh 13 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_dior_split2_seed0.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhu-xlab/ST-FSOD/18ee6e3346589ea72b7cfa24347baaee3727408a/scripts/test_st-tfa_dior_split2_seed0.sh -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning/iter_1000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning/iter_3000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning/iter_5000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split1_seed1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed1_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed1_10shot-fine-tuning/iter_2000.pth --eval='bbox' 2 | # python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed1_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed1_50shot-fine-tuning/iter_3000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed1_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed1_100shot-fine-tuning/iter_5000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split1_seed2.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed2_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed2_10shot-fine-tuning/iter_1000.pth --eval='bbox' 2 | # python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed2_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed2_50shot-fine-tuning/iter_10000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed2_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split1_seed2_100shot-fine-tuning/iter_6000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed0_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed0_10shot-fine-tuning/iter_4000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed0_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed0_50shot-fine-tuning/iter_7000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed0_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed0_100shot-fine-tuning/iter_6000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split2_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed1_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed1_10shot-fine-tuning/iter_4000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed1_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed1_50shot-fine-tuning/iter_7000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed1_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed1_100shot-fine-tuning/iter_8000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split2_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed2_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed2_10shot-fine-tuning/iter_2000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed2_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed2_50shot-fine-tuning/iter_8000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split2/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed2_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split2_seed2_100shot-fine-tuning/iter_8000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split3.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed0_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed0_10shot-fine-tuning/iter_3000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed0_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed0_50shot-fine-tuning/iter_9000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed0_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed0_100shot-fine-tuning/iter_7000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split3_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed1_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed1_10shot-fine-tuning/iter_4000.pth --eval='bbox' 2 | # python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed1_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed1_50shot-fine-tuning/iter_9000.pth --eval='bbox' 3 | # python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed1_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed1_100shot-fine-tuning/iter_7000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_isaid_split3_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed2_10shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed2_10shot-fine-tuning/iter_3000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed2_50shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed2_50shot-fine-tuning/iter_9000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split3/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed2_100shot-fine-tuning.py work_dirs/st-tfa_maskrcnn_r50_isaid-split3_seed2_100shot-fine-tuning/iter_9000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_no-roi_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-roi/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py work_dirs/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning/iter_3000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-roi/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py work_dirs/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning/iter_5000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-roi/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py work_dirs/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning/iter_5000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_st-tfa_no-rpn_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_no-rpn/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py work_dirs/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning/iter_1000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_no-rpn/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py work_dirs/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning/iter_3000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_no-rpn/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py work_dirs/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning/iter_5000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_tfa_balance_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/test.py configs/detection/tfa/isaid/split1/seed0/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_10shot-fine-tuning.py work_dirs/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_10shot-fine-tuning/iter_3000.pth --eval='bbox' 2 | # python tools/detection/test.py configs/detection/tfa/isaid/split1/seed0/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_50shot-fine-tuning.py work_dirs/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_50shot-fine-tuning/iter_5000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/tfa/isaid/split1/seed0/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_100shot-fine-tuning.py work_dirs/tfa_balance_maskrcnn_r50_40k_isaid-split1_seed0_100shot-fine-tuning/iter_7000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/test_tfa_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/test.py configs/detection/tfa/isaid/split1/seed0/tfa_maskrcnn_r50_80k_isaid-split1_seed0_10shot-fine-tuning.py work_dirs/tfa_maskrcnn_r50_80k_isaid-split1_seed0_10shot-fine-tuning/iter_4000.pth --eval='bbox' 2 | python tools/detection/test.py configs/detection/tfa/isaid/split1/seed0/tfa_maskrcnn_r50_80k_isaid-split1_seed0_50shot-fine-tuning.py work_dirs/tfa_maskrcnn_r50_80k_isaid-split1_seed0_50shot-fine-tuning/iter_6000.pth --eval='bbox' 3 | python tools/detection/test.py configs/detection/tfa/isaid/split1/seed0/tfa_maskrcnn_r50_80k_isaid-split1_seed0_100shot-fine-tuning.py work_dirs/tfa_maskrcnn_r50_80k_isaid-split1_seed0_100shot-fine-tuning/iter_8000.pth --eval='bbox' 4 | -------------------------------------------------------------------------------- /scripts/train_base_dior-trainval.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/st-tfa_maskrcnn_r101_40k_dior-trainval-split2_base-training.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/st-tfa_maskrcnn_r101_40k_dior-trainval-split3_base-training.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/st-tfa_maskrcnn_r101_40k_dior-trainval-split4_base-training.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/st-tfa_maskrcnn_r101_40k_dior-trainval-split5_base-training.py 5 | -------------------------------------------------------------------------------- /scripts/train_base_dior.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_rpn/dior/split3/st_rpn_maskrcnn_r101_40k_dior-split3_base-training.py 2 | # python tools/detection/train.py configs/detection/st_rpn/dior/split4/st_rpn_maskrcnn_r101_40k_dior-split4_base-training.py 3 | python tools/detection/train.py configs/detection/st_rpn/dior/split5/st_rpn_maskrcnn_r101_40k_dior-split5_base-training.py 4 | -------------------------------------------------------------------------------- /scripts/train_ft_dior_split2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_rpn/dior/split2/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split2_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_rpn/dior/split2/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split2_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_rpn/dior/split2/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split2_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_rpn/dior/split2/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split2_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_ft_dior_split3.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_rpn/dior/split3/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split3_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_rpn/dior/split3/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split3_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_rpn/dior/split3/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split3_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_rpn/dior/split3/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split3_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_ft_dior_split4.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_rpn/dior/split4/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split4_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_rpn/dior/split4/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split4_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_rpn/dior/split4/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split4_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_rpn/dior/split4/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split4_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_ft_dior_split5.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_rpn/dior/split5/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split5_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_rpn/dior/split5/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split5_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_rpn/dior/split5/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split5_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_rpn/dior/split5/seed0/st_rpn_v6_tfa_maskrcnn_r101_40k_dior-split5_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split2_seed0.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split2_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split2_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split2_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split3_seed0.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed0_3shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed0_5shot-fine-tuning.py 3 | # python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split3_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split3_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split3_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split4_seed0.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split4_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split4_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split4_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split5_seed0.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split5_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior-trainval_split5_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-trainval-split5_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split1_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split1_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split1_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split2_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split2_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split2/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split2_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split3.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split3_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split3_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split3/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split3_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split4.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split4_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split4_seed2.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split4/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split4_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split5.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed0/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split5_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed1/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_dior_split5_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/dior/split5/seed2/st-tfa/st-tfa_maskrcnn_r101_dior-split5_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split1_no-rpn.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_no-rpn/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_no-rpn/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_no-rpn/st-tfa_no-rpn_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split1_seed1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed1_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed1_50shot-fine-tuning.py 3 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed1_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split1_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed2_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed2_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split1_seed2_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split1_sensitivity.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_roi-thre60_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_roi-thre60_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_roi-thre60_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_roi-thre90_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 5 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_roi-thre90_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 6 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_roi-thre90_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 7 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_rpn-thre60_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 8 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_rpn-thre60_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 9 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_rpn-thre60_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 10 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_rpn-thre90_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 11 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_rpn-thre90_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 12 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_rpn-thre90_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 13 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_alpha99_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 14 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_alpha99_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 15 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st-tfa_sensitivity/st-tfa_alpha99_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 16 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split2_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed1_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed1_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed1_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split2_seed2.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed2_10shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed2_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split2/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split2_seed2_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split3.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed0/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split3_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed1_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed1_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed1/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed1_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_isaid_split3_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed2_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed2_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split3/seed2/st-tfa/st-tfa_maskrcnn_r50_isaid-split3_seed2_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_nwpu_2k_split1_seed0.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_nwpu_2k_split1_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_nwpu_2k_split1_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_2k_nwpu-split1_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_nwpu_split1_seed0.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed0/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed0_20shot-fine-tuning.py 3 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_nwpu_split1_seed1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed1_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed1_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed1_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed1/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed1_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st-tfa_nwpu_split1_seed2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed2_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed2_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed2_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/nwpu/split1/seed2/st-tfa/st-tfa_maskrcnn_r50_nwpu-split1_seed2_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_dior_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/dior/split1/seed0/st_rpn_v8_st-thre08_tfa_maskrcnn_r101_40k_dior-split1_seed0_3shot-fine-tuning.py 2 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_isaid_split1_roi-thre80-bg00_unlabeled.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_roi-thre80-bg00_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_roi-thre80-bg00_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_roi-thre80-bg00_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_isaid_split1_roi-thre80_unlabeled.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_roi-thre80_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_roi-thre80_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_roi-thre80_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_isaid_split1_sensitivity_roi.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre08_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre08_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre08_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre09_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 5 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre09_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 6 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre09_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 7 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_isaid_split1_sensitivity_roi_v2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre08-bg00_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre08-bg00_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_roi-thre08-bg00_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_isaid_split1_sensitivity_rpn.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_rpn-thre06_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_rpn-thre09_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 3 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_rpn-thre06_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 4 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_rpn-thre09_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 5 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_rpn-thre06_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 6 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_sensitivity/st_rpn_v6_rpn-thre09_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 7 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_isaid_split1_unlabeled.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_unlabeled/st_rpn_v6_unlabeled_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_no-roi_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-roi/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-roi/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-roi/st_rpn_v6_no-roi_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_no-rpn_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-rpn/st_rpn_v6_no-rpn_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-rpn/st_rpn_v6_no-rpn_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-rpn/st_rpn_v6_no-rpn_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_st_tfa_no-rpn_isaid_split1_v2.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-rpn/st_rpn_v6_roi-thre80_no-rpn_tfa_maskrcnn_r50_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-rpn/st_rpn_v6_roi-thre80_no-rpn_tfa_maskrcnn_r50_isaid-split1_seed0_50shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/st_tfa/isaid/split1/seed0/st_rpn_v6_no-rpn/st_rpn_v6_roi-thre80_no-rpn_tfa_maskrcnn_r50_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_tfa_balance_dior_split1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_balance_maskrcnn_r101_40k_dior-split1_seed0_3shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_balance_maskrcnn_r101_40k_dior-split1_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_balance_maskrcnn_r101_40k_dior-split1_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_balance_maskrcnn_r101_40k_dior-split1_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_tfa_balance_dior_split2.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_balance_maskrcnn_r101_40k_dior-split2_seed0_3shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_balance_maskrcnn_r101_40k_dior-split2_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_balance_maskrcnn_r101_40k_dior-split2_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_balance_maskrcnn_r101_40k_dior-split2_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_tfa_balance_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | python tools/detection/train.py configs/detection/tfa/isaid/split1/seed0/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/tfa/isaid/split1/seed0/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_50shot-fine-tuning.py 3 | # python tools/detection/train.py configs/detection/tfa/isaid/split1/seed0/tfa_balance_maskrcnn_r50_80k_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /scripts/train_tfa_dior_split1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_maskrcnn_r101_40k_dior-split1_seed0_3shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_maskrcnn_r101_40k_dior-split1_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_maskrcnn_r101_40k_dior-split1_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/tfa/dior/split1/seed0/tfa_maskrcnn_r101_40k_dior-split1_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_tfa_dior_split2.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_maskrcnn_r101_40k_dior-split2_seed0_3shot-fine-tuning.py 2 | # python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_maskrcnn_r101_40k_dior-split2_seed0_5shot-fine-tuning.py 3 | python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_maskrcnn_r101_40k_dior-split2_seed0_10shot-fine-tuning.py 4 | python tools/detection/train.py configs/detection/tfa/dior/split2/seed0/tfa_maskrcnn_r101_40k_dior-split2_seed0_20shot-fine-tuning.py 5 | -------------------------------------------------------------------------------- /scripts/train_tfa_isaid_split1.sh: -------------------------------------------------------------------------------- 1 | # python tools/detection/train.py configs/detection/tfa/isaid/split1/seed0/tfa_maskrcnn_r50_80k_isaid-split1_seed0_10shot-fine-tuning.py 2 | python tools/detection/train.py configs/detection/tfa/isaid/split1/seed0/tfa_maskrcnn_r50_80k_isaid-split1_seed0_50shot-fine-tuning.py 3 | # python tools/detection/train.py configs/detection/tfa/isaid/split1/seed0/tfa_maskrcnn_r50_80k_isaid-split1_seed0_100shot-fine-tuning.py 4 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | extra_standard_library = setuptools 5 | known_first_party = rsifewshot 6 | known_third_party = cv2,mmcls,mmcv,mmdet,numpy,pytest,pytorch_sphinx_theme,terminaltables,torch,typing_extensions 7 | 8 | no_lines_before = STDLIB,LOCALFOLDER 9 | default_section = THIRDPARTY 10 | 11 | [yapf] 12 | BASED_ON_STYLE = pep8 13 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 14 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 15 | -------------------------------------------------------------------------------- /tools/classification/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | CHECKPOINT=$2 5 | GPUS=$3 6 | NNODES=${NNODES:-1} 7 | NODE_RANK=${NODE_RANK:-0} 8 | PORT=${PORT:-29500} 9 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 10 | 11 | PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ 12 | python -m torch.distributed.launch \ 13 | --nnodes=$NNODES \ 14 | --node_rank=$NODE_RANK \ 15 | --master_addr=$MASTER_ADDR \ 16 | --nproc_per_node=$GPUS \ 17 | --master_port=$PORT \ 18 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 19 | -------------------------------------------------------------------------------- /tools/classification/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | NNODES=${NNODES:-1} 6 | NODE_RANK=${NODE_RANK:-0} 7 | PORT=${PORT:-29500} 8 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 9 | 10 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 11 | python -m torch.distributed.launch \ 12 | --nnodes=$NNODES \ 13 | --node_rank=$NODE_RANK \ 14 | --master_addr=$MASTER_ADDR \ 15 | --nproc_per_node=$GPUS \ 16 | --master_port=$PORT \ 17 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 18 | -------------------------------------------------------------------------------- /tools/classification/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/classification/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /tools/classification/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${@:5} 14 | 15 | PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/classification/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /tools/data/README.md: -------------------------------------------------------------------------------- 1 | # Data Preparation for MMFewShot 2 | 3 | It is recommended to symlink the dataset root to `$MMFEWSHOT/data`. 4 | If your folder structure is different, you may need to change the corresponding paths in config files. 5 | 6 | # Few Shot Classification 7 | 8 | Datasets supported in MMFewShot: 9 | 10 | - [CUB Dataset](classification/cub/README.md) \[ [Homepage](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html) \] 11 | - [Mini ImageNet Dataset](classification/mini-imagenet/README.md) \[ [Homepage](https://image-net.org/challenges/LSVRC/2012/) \] \[ [Split](https://github.com/twitter/meta-learning-lstm/tree/master/data/miniImagenet) \] 12 | - [Tiered ImageNet Dataset](classification/tiered-imagenet/README.md) \[ [Homepage](https://image-net.org/challenges/LSVRC/2012/) \] \[ [Split](https://github.com/renmengye/few-shot-ssl-public#tieredimagenet) \] 13 | 14 | # Few Shot Detection 15 | 16 | Datasets supported in MMFewShot: 17 | 18 | - [COCO Dataset](detection/coco/README.md) \[ [Homepage](https://cocodataset.org/#home) \] \[ [Split](http://dl.yf.io/fs-det/datasets/cocosplit/) \] 19 | - [VOC Dataset](detection/voc/README.md) \[ [Homepage](http://host.robots.ox.ac.uk/pascal/VOC/) \] \[ [Split](http://dl.yf.io/fs-det/datasets/vocsplit/) \] 20 | -------------------------------------------------------------------------------- /tools/data/classification/README.md: -------------------------------------------------------------------------------- 1 | # Data Preparation for Few Shot Classification 2 | 3 | It is recommended to symlink the dataset root to `$MMFEWSHOT/data`. 4 | If your folder structure is different, you may need to change the corresponding paths in config files. 5 | 6 | Datasets supported in MMFewShot: 7 | 8 | - [CUB Dataset](cub/README.md) \[ [Homepage](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html) \] 9 | - [Mini ImageNet Dataset](mini-imagenet/README.md) \[ [Homepage](https://image-net.org/challenges/LSVRC/2012/) \] \[ [Split](https://github.com/twitter/meta-learning-lstm/tree/master/data/miniImagenet) \] 10 | - [Tiered ImageNet Dataset](tiered-imagenet/README.md) \[ [Homepage](https://image-net.org/challenges/LSVRC/2012/) \] \[ [Split](https://github.com/renmengye/few-shot-ssl-public#tieredimagenet) \] 11 | -------------------------------------------------------------------------------- /tools/data/classification/cub/README.md: -------------------------------------------------------------------------------- 1 | # Preparing CUB Dataset 2 | 3 | 4 | 5 | ```bibtex 6 | @techreport{WahCUB_200_2011, 7 | Title = {{The Caltech-UCSD Birds-200-2011 Dataset}}, 8 | Author = {Wah, C. and Branson, S. and Welinder, P. and Perona, P. and Belongie, S.}, 9 | Year = {2011} 10 | Institution = {California Institute of Technology}, 11 | Number = {CNS-TR-2011-001} 12 | } 13 | ``` 14 | 15 | The CUB dataset can be downloaded from [here](http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz). 16 | 17 | The data structure is as follows: 18 | 19 | ```text 20 | rsifewshot 21 | ├── rsifewshot 22 | ├── tools 23 | ├── configs 24 | ├── data 25 | │ ├── CUB_200_2011 26 | │ │ ├── images 27 | │ │ │ ├── 001.Black_footed_Albatross 28 | ... 29 | ``` 30 | -------------------------------------------------------------------------------- /tools/data/classification/mini-imagenet/README.md: -------------------------------------------------------------------------------- 1 | # Preparing Mini-ImageNet Dataset 2 | 3 | 4 | 5 | ```bibtex 6 | @inproceedings{ren18fewshotssl, 7 | author = {Mengye Ren and Eleni Triantafillou and Sachin Ravi and Jake Snell and Kevin Swersky and Joshua B. Tenenbaum and Hugo Larochelle and Richard S. Zemel}, 8 | title = {Meta-Learning for Semi-Supervised Few-Shot Classification}, 9 | booktitle = {Proceedings of 6th International Conference on Learning Representations {ICLR}}, 10 | year = {2018}, 11 | } 12 | 13 | @article{ILSVRC15, 14 | Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei}, 15 | Title = {{ImageNet Large Scale Visual Recognition Challenge}}, 16 | Year = {2015}, 17 | journal = {International Journal of Computer Vision (IJCV)}, 18 | doi = {10.1007/s11263-015-0816-y}, 19 | volume = {115}, 20 | number = {3}, 21 | pages = {211-252} 22 | } 23 | ``` 24 | 25 | The split files of mini-imagenet can be downloaded from [here](https://github.com/twitter-research/meta-learning-lstm/tree/master/data/miniImagenet). 26 | The whole imagenet dataset can be downloaded from [here](https://image-net.org/challenges/LSVRC/2012/index.php). 27 | 28 | The data structure is as follows: 29 | 30 | ```text 31 | rsifewshot 32 | ├── rsifewshot 33 | ├── configs 34 | ├── data 35 | │ ├── mini_imagenet 36 | │ │ ├── images 37 | │ │ │ ├── n01440764 38 | │ │ │ │ ├── n01440764_10026.JPEG 39 | │ │ │ │ ├── ... 40 | │ │ │ ├── n01443537 41 | │ │ │ ├── ... 42 | │ │ ├── test.csv 43 | │ │ ├── train.csv 44 | │ │ ├── val.csv 45 | ... 46 | ``` 47 | -------------------------------------------------------------------------------- /tools/data/classification/tiered-imagenet/README.md: -------------------------------------------------------------------------------- 1 | # Preparing Tiered ImageNet Dataset 2 | 3 | 4 | 5 | ```bibtex 6 | @inproceedings{ren18fewshotssl, 7 | author = {Mengye Ren and Eleni Triantafillou and Sachin Ravi and Jake Snell and Kevin Swersky and Joshua B. Tenenbaum and Hugo Larochelle and Richard S. Zemel}, 8 | title = {Meta-Learning for Semi-Supervised Few-Shot Classification}, 9 | booktitle = {Proceedings of 6th International Conference on Learning Representations {ICLR}}, 10 | year = {2018}, 11 | } 12 | 13 | @article{ILSVRC15, 14 | Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei}, 15 | Title = {{ImageNet Large Scale Visual Recognition Challenge}}, 16 | Year = {2015}, 17 | journal = {International Journal of Computer Vision (IJCV)}, 18 | doi = {10.1007/s11263-015-0816-y}, 19 | volume = {115}, 20 | number = {3}, 21 | pages = {211-252} 22 | } 23 | ``` 24 | 25 | The pickle file of tiered imagenet dataset is released in [repo](https://github.com/renmengye/few-shot-ssl-public#tieredimagenet) can be downloaded from [here](https://drive.google.com/open?id=1g1aIDy2Ar_MViF2gDXFYDBTR-HYecV07). 26 | The data structure is as follows: 27 | 28 | ```text 29 | rsifewshot 30 | ├── rsifewshot 31 | ├── tools 32 | ├── configs 33 | ├── data 34 | │ ├── tiered_imagenet 35 | │ │ ├── train_images_png.pkl 36 | │ │ ├── train_labels.pkl 37 | │ │ ├── val_images_png.pkl 38 | │ │ ├── val_labels.pkl 39 | │ │ ├── test_images_png.pkl 40 | │ │ ├── test_labels.pkl 41 | ... 42 | ``` 43 | 44 | ## Unzip the pickle file 45 | 46 | If you want to save memory usage, you can unzip the pickle files by: 47 | 48 | ```shell 49 | python tools/data/classification/tiered-imagenet/unzip_tiered_imagenet.py --dir ./data/tiered_imagenet 50 | ``` 51 | -------------------------------------------------------------------------------- /tools/data/classification/tiered-imagenet/unzip_tiered_imagenet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | """Unzip tiered imagenet dataset from pickle file.""" 3 | 4 | import argparse 5 | import os 6 | import pickle 7 | 8 | import mmcv 9 | 10 | 11 | def main(): 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument( 14 | '--dir', 15 | default='data/tiered_imagenet', 16 | help='the directory to tiered imagenet') 17 | args = parser.parse_args() 18 | data_prefix = args.dir 19 | for subset in ['train', 'test', 'val']: 20 | img_bytes_file = os.path.join(data_prefix, f'{subset}_images_png.pkl') 21 | os.makedirs((os.path.join(data_prefix, subset)), exist_ok=True) 22 | print(f'unzipping {subset} file...') 23 | with open(img_bytes_file, 'rb') as img_bytes: 24 | img_bytes = pickle.load(img_bytes) 25 | prog_bar = mmcv.ProgressBar(len(img_bytes)) 26 | for i in range(len(img_bytes)): 27 | filename = os.path.join(data_prefix, subset, 28 | f'{subset}_image_{i}.byte') 29 | # write bytes to file 30 | with open(filename, 'wb') as binary_file: 31 | binary_file.write(img_bytes[i]) 32 | prog_bar.update() 33 | 34 | 35 | if __name__ == '__main__': 36 | main() 37 | -------------------------------------------------------------------------------- /tools/data/detection/README.md: -------------------------------------------------------------------------------- 1 | # Data Preparation for Few Shot Detection 2 | 3 | It is recommended to symlink the dataset root to `$MMFEWSHOT/data`. 4 | If your folder structure is different, you may need to change the corresponding paths in config files. 5 | 6 | Datasets supported in MMFewShot: 7 | 8 | - [COCO Dataset](coco/README.md) \[ [Homepage](https://cocodataset.org/#home) \] \[ [Split](http://dl.yf.io/fs-det/datasets/cocosplit/) \] 9 | - [VOC Dataset](voc/README.md) \[ [Homepage](http://host.robots.ox.ac.uk/pascal/VOC/) \] \[ [Split](http://dl.yf.io/fs-det/datasets/vocsplit/) \] 10 | -------------------------------------------------------------------------------- /tools/data/detection/coco/README.md: -------------------------------------------------------------------------------- 1 | # Preparing COCO Dataset 2 | 3 | 4 | 5 | ```bibtex 6 | @inproceedings{lin2014microsoft, 7 | title={Microsoft coco: Common objects in context}, 8 | author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence}, 9 | booktitle={European conference on computer vision}, 10 | pages={740--755}, 11 | year={2014}, 12 | organization={Springer} 13 | } 14 | 15 | @inproceedings{kang2019few, 16 | title={Few-shot Object Detection via Feature Reweighting}, 17 | author={Kang, Bingyi and Liu, Zhuang and Wang, Xin and Yu, Fisher and Feng, Jiashi and Darrell, Trevor}, 18 | booktitle={ICCV}, 19 | year={2019} 20 | } 21 | ``` 22 | 23 | ## download coco dataset 24 | 25 | The coco14/coco17 dataset can be downloaded from [here](https://cocodataset.org/#download). 26 | 27 | In rsifewshot, coco14 is used as default setting, while coco17 is optional. 28 | Some methods (attention rpn) were proposed with coco17 data split, which is also evaluated in rsifewshot. 29 | 30 | The data structure is as follows: 31 | 32 | ```none 33 | rsifewshot 34 | ├── rsifewshot 35 | ├── tools 36 | ├── configs 37 | ├── data 38 | │ ├── coco 39 | │ │ ├── annotations 40 | │ │ ├── train2014 41 | │ │ ├── val2014 42 | │ │ ├── train2017 (optional) 43 | │ │ ├── val2017 (optional) 44 | ``` 45 | 46 | ## download few shot annotations 47 | 48 | In rsifewshot, we use the train/val/few shot split of coco14 released in TFA [repo](https://github.com/ucbdrive/few-shot-object-detection). 49 | The original data spilt can be found in [here](http://dl.yf.io/fs-det/datasets/cocosplit/). 50 | 51 | We provide a re-organized data split. 52 | Please download [coco.tar.gz](https://download.openmmlab.com/rsifewshot/few_shot_ann/coco.tar.gz) 53 | and unzip them into `$MMFEWSHOT/data/few_shot_ann`. 54 | 55 | The final data structure is as follows: 56 | 57 | ```none 58 | rsifewshot 59 | ├── rsifewshot 60 | ├── tools 61 | ├── configs 62 | ├── data 63 | │ ├── coco 64 | │ │ ├── annotations 65 | │ │ ├── train2014 66 | │ │ ├── val2014 67 | │ │ ├── train2017 (optional) 68 | │ │ ├── val2017 (optional) 69 | │ ├── few_shot_ann 70 | │ │ ├── coco 71 | │ │ │ ├── annotations 72 | │ │ │ │ ├── train.json 73 | │ │ │ │ ├── val.json 74 | │ │ │ ├── attention_rpn_10shot (for coco17) 75 | │ │ │ ├── benchmark_10shot 76 | │ │ │ ├── benchmark_30shot 77 | ``` 78 | -------------------------------------------------------------------------------- /tools/data/detection/voc/README.md: -------------------------------------------------------------------------------- 1 | # Few Shot Detection Data Preparation 2 | 3 | 4 | 5 | ```bibtex 6 | @article{everingham2010pascal, 7 | title={The pascal visual object classes (voc) challenge}, 8 | author={Everingham, Mark and Van Gool, Luc and Williams, Christopher KI and Winn, John and Zisserman, Andrew}, 9 | journal={International journal of computer vision}, 10 | volume={88}, 11 | number={2}, 12 | pages={303--338}, 13 | year={2010}, 14 | publisher={Springer} 15 | } 16 | 17 | @inproceedings{kang2019few, 18 | title={Few-shot Object Detection via Feature Reweighting}, 19 | author={Kang, Bingyi and Liu, Zhuang and Wang, Xin and Yu, Fisher and Feng, Jiashi and Darrell, Trevor}, 20 | booktitle={ICCV}, 21 | year={2019} 22 | } 23 | ``` 24 | 25 | ## download VOC dataset 26 | 27 | The VOC 2007/2012 dataset can be downloaded from [here](http://host.robots.ox.ac.uk/pascal/VOC/). 28 | 29 | In rsifewshot, VOC 2007(trainval) + 2012 (trainval) are used for training and VOC 2007(test) is used for evaluation. 30 | 31 | The data structure is as follows: 32 | 33 | ```none 34 | rsifewshot 35 | ├── rsifewshot 36 | ├── tools 37 | ├── configs 38 | ├── data 39 | │ ├── VOCdevkit 40 | │ │ ├── VOC2007 41 | │ │ ├── VOC2012 42 | ``` 43 | 44 | ## download few shot annotations 45 | 46 | In rsifewshot, we use the VOC few shot split released in TFA [repo](https://github.com/ucbdrive/few-shot-object-detection). 47 | The original data spilt can be found in [here](http://dl.yf.io/fs-det/datasets/vocsplit/). 48 | 49 | We provide a re-organized data split. 50 | Please download [voc.tar.gz](https://download.openmmlab.com/rsifewshot/few_shot_ann/voc.tar.gz) 51 | and unzip them into `$MMFEWSHOT/data/few_shot_ann`. 52 | 53 | The final data structure is as follows: 54 | 55 | ```none 56 | rsifewshot 57 | ├── rsifewshot 58 | ├── tools 59 | ├── configs 60 | ├── data 61 | │ ├── VOCdevkit 62 | │ │ ├── VOC2007 63 | │ │ ├── VOC2012 64 | │ ├── few_shot_ann 65 | │ │ ├── voc 66 | │ │ │ ├── benchmark_1shot 67 | │ │ │ ├── benchmark_2shot 68 | │ │ │ ├── benchmark_3shot 69 | │ │ │ ├── benchmark_5shot 70 | │ │ │ ├── benchmark_10shot 71 | ``` 72 | -------------------------------------------------------------------------------- /tools/detection/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | CHECKPOINT=$2 5 | GPUS=$3 6 | NNODES=${NNODES:-1} 7 | NODE_RANK=${NODE_RANK:-0} 8 | PORT=${PORT:-29500} 9 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 10 | 11 | PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ 12 | python -m torch.distributed.launch \ 13 | --nnodes=$NNODES \ 14 | --node_rank=$NODE_RANK \ 15 | --master_addr=$MASTER_ADDR \ 16 | --nproc_per_node=$GPUS \ 17 | --master_port=$PORT \ 18 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 19 | -------------------------------------------------------------------------------- /tools/detection/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | NNODES=${NNODES:-1} 6 | NODE_RANK=${NODE_RANK:-0} 7 | PORT=${PORT:-29500} 8 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 9 | 10 | PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ 11 | python -m torch.distributed.launch \ 12 | --nnodes=$NNODES \ 13 | --node_rank=$NODE_RANK \ 14 | --master_addr=$MASTER_ADDR \ 15 | --nproc_per_node=$GPUS \ 16 | --master_port=$PORT \ 17 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 18 | -------------------------------------------------------------------------------- /tools/detection/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/detection/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /tools/detection/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | WORK_DIR=$4 9 | GPUS=${GPUS:-8} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-8} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | SRUN_ARGS=${SRUN_ARGS:-""} 13 | PY_ARGS=${@:5} 14 | 15 | PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/detection/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /tools/misc/print_config.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import argparse 3 | import warnings 4 | 5 | from mmcv import Config, DictAction 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser(description='Print the whole config') 10 | parser.add_argument('config', help='config file path') 11 | parser.add_argument( 12 | '--options', 13 | nargs='+', 14 | action=DictAction, 15 | help='override some settings in the used config, the key-value pair ' 16 | 'in xxx=yyy format will be merged into config file (deprecate), ' 17 | 'change to --cfg-options instead.') 18 | parser.add_argument( 19 | '--cfg-options', 20 | nargs='+', 21 | action=DictAction, 22 | help='override some settings in the used config, the key-value pair ' 23 | 'in xxx=yyy format will be merged into config file. If the value to ' 24 | 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 25 | 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 26 | 'Note that the quotation marks are necessary and that no white space ' 27 | 'is allowed.') 28 | args = parser.parse_args() 29 | 30 | if args.options and args.cfg_options: 31 | raise ValueError( 32 | '--options and --cfg-options cannot be both ' 33 | 'specified, --options is deprecated in favor of --cfg-options') 34 | if args.options: 35 | warnings.warn('--options is deprecated in favor of --cfg-options') 36 | args.cfg_options = args.options 37 | 38 | return args 39 | 40 | 41 | def main(): 42 | args = parse_args() 43 | 44 | cfg = Config.fromfile(args.config) 45 | if args.cfg_options is not None: 46 | cfg.merge_from_dict(args.cfg_options) 47 | # import modules from string list. 48 | if cfg.get('custom_imports', None): 49 | from mmcv.utils import import_modules_from_strings 50 | import_modules_from_strings(**cfg['custom_imports']) 51 | print(f'Config:\n{cfg.pretty_text}') 52 | 53 | 54 | if __name__ == '__main__': 55 | main() 56 | --------------------------------------------------------------------------------