├── .gitignore ├── GETTING_STARTED.md ├── INSTALL.md ├── LICENSE ├── MODEL_ZOO.md ├── README.md ├── assets ├── DATA.md ├── EVALUATION.md ├── RIS.png ├── Ref-vos.png ├── TRAIN.md ├── VOS.png ├── network.png ├── results.png └── zero-few-shot.png ├── configs ├── Base-RCNN-C4.yaml ├── Base-RCNN-DilatedC5.yaml ├── Base-RCNN-FPN.yaml ├── Base-RetinaNet.yaml ├── COCO-Detection │ ├── fast_rcnn_R_50_FPN_1x.yaml │ ├── faster_rcnn_R_101_C4_3x.yaml │ ├── faster_rcnn_R_101_DC5_3x.yaml │ ├── faster_rcnn_R_101_FPN_3x.yaml │ ├── faster_rcnn_R_50_C4_1x.yaml │ ├── faster_rcnn_R_50_C4_3x.yaml │ ├── faster_rcnn_R_50_DC5_1x.yaml │ ├── faster_rcnn_R_50_DC5_3x.yaml │ ├── faster_rcnn_R_50_FPN_1x.yaml │ ├── faster_rcnn_R_50_FPN_3x.yaml │ ├── faster_rcnn_X_101_32x8d_FPN_3x.yaml │ ├── fcos_R_50_FPN_1x.py │ ├── retinanet_R_101_FPN_3x.yaml │ ├── retinanet_R_50_FPN_1x.py │ ├── retinanet_R_50_FPN_1x.yaml │ ├── retinanet_R_50_FPN_3x.yaml │ ├── rpn_R_50_C4_1x.yaml │ └── rpn_R_50_FPN_1x.yaml ├── COCO-InstanceSegmentation │ ├── mask_rcnn_R_101_C4_3x.yaml │ ├── mask_rcnn_R_101_DC5_3x.yaml │ ├── mask_rcnn_R_101_FPN_3x.yaml │ ├── mask_rcnn_R_50_C4_1x.py │ ├── mask_rcnn_R_50_C4_1x.yaml │ ├── mask_rcnn_R_50_C4_3x.yaml │ ├── mask_rcnn_R_50_DC5_1x.yaml │ ├── mask_rcnn_R_50_DC5_3x.yaml │ ├── mask_rcnn_R_50_FPN_1x.py │ ├── mask_rcnn_R_50_FPN_1x.yaml │ ├── mask_rcnn_R_50_FPN_1x_giou.yaml │ ├── mask_rcnn_R_50_FPN_3x.yaml │ ├── mask_rcnn_X_101_32x8d_FPN_3x.yaml │ ├── mask_rcnn_regnetx_4gf_dds_fpn_1x.py │ └── mask_rcnn_regnety_4gf_dds_fpn_1x.py ├── COCO-Keypoints │ ├── Base-Keypoint-RCNN-FPN.yaml │ ├── keypoint_rcnn_R_101_FPN_3x.yaml │ ├── keypoint_rcnn_R_50_FPN_1x.py │ ├── keypoint_rcnn_R_50_FPN_1x.yaml │ ├── keypoint_rcnn_R_50_FPN_3x.yaml │ └── keypoint_rcnn_X_101_32x8d_FPN_3x.yaml ├── COCO-PanopticSegmentation │ ├── Base-Panoptic-FPN.yaml │ ├── panoptic_fpn_R_101_3x.yaml │ ├── panoptic_fpn_R_50_1x.py │ ├── panoptic_fpn_R_50_1x.yaml │ └── panoptic_fpn_R_50_3x.yaml ├── Cityscapes │ └── mask_rcnn_R_50_FPN.yaml ├── Detectron1-Comparisons │ ├── README.md │ ├── faster_rcnn_R_50_FPN_noaug_1x.yaml │ ├── keypoint_rcnn_R_50_FPN_1x.yaml │ └── mask_rcnn_R_50_FPN_noaug_1x.yaml ├── LVISv0.5-InstanceSegmentation │ ├── mask_rcnn_R_101_FPN_1x.yaml │ ├── mask_rcnn_R_50_FPN_1x.yaml │ └── mask_rcnn_X_101_32x8d_FPN_1x.yaml ├── LVISv1-InstanceSegmentation │ ├── mask_rcnn_R_101_FPN_1x.yaml │ ├── mask_rcnn_R_50_FPN_1x.yaml │ └── mask_rcnn_X_101_32x8d_FPN_1x.yaml ├── Misc │ ├── cascade_mask_rcnn_R_50_FPN_1x.yaml │ ├── cascade_mask_rcnn_R_50_FPN_3x.yaml │ ├── cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml │ ├── mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml │ ├── mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml │ ├── mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml │ ├── mask_rcnn_R_50_FPN_3x_gn.yaml │ ├── mask_rcnn_R_50_FPN_3x_syncbn.yaml │ ├── mmdet_mask_rcnn_R_50_FPN_1x.py │ ├── panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml │ ├── scratch_mask_rcnn_R_50_FPN_3x_gn.yaml │ ├── scratch_mask_rcnn_R_50_FPN_9x_gn.yaml │ ├── scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml │ ├── semantic_R_50_FPN_1x.yaml │ └── torchvision_imagenet_R_50.py ├── PascalVOC-Detection │ ├── faster_rcnn_R_50_C4.yaml │ └── faster_rcnn_R_50_FPN.yaml ├── common │ ├── README.md │ ├── coco_schedule.py │ ├── data │ │ ├── coco.py │ │ ├── coco_keypoint.py │ │ └── coco_panoptic_separated.py │ ├── models │ │ ├── cascade_rcnn.py │ │ ├── fcos.py │ │ ├── keypoint_rcnn_fpn.py │ │ ├── mask_rcnn_c4.py │ │ ├── mask_rcnn_fpn.py │ │ ├── panoptic_fpn.py │ │ └── retinanet.py │ ├── optim.py │ └── train.py ├── new_baselines │ ├── mask_rcnn_R_101_FPN_100ep_LSJ.py │ ├── mask_rcnn_R_101_FPN_200ep_LSJ.py │ ├── mask_rcnn_R_101_FPN_400ep_LSJ.py │ ├── mask_rcnn_R_50_FPN_100ep_LSJ.py │ ├── mask_rcnn_R_50_FPN_200ep_LSJ.py │ ├── mask_rcnn_R_50_FPN_400ep_LSJ.py │ ├── mask_rcnn_R_50_FPN_50ep_LSJ.py │ ├── mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py │ ├── mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py │ ├── mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py │ ├── mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py │ ├── mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py │ └── mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py └── quick_schedules │ ├── README.md │ ├── cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml │ ├── cascade_mask_rcnn_R_50_FPN_instant_test.yaml │ ├── fast_rcnn_R_50_FPN_inference_acc_test.yaml │ ├── fast_rcnn_R_50_FPN_instant_test.yaml │ ├── keypoint_rcnn_R_50_FPN_inference_acc_test.yaml │ ├── keypoint_rcnn_R_50_FPN_instant_test.yaml │ ├── keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml │ ├── keypoint_rcnn_R_50_FPN_training_acc_test.yaml │ ├── mask_rcnn_R_50_C4_GCV_instant_test.yaml │ ├── mask_rcnn_R_50_C4_inference_acc_test.yaml │ ├── mask_rcnn_R_50_C4_instant_test.yaml │ ├── mask_rcnn_R_50_C4_training_acc_test.yaml │ ├── mask_rcnn_R_50_DC5_inference_acc_test.yaml │ ├── mask_rcnn_R_50_FPN_inference_acc_test.yaml │ ├── mask_rcnn_R_50_FPN_instant_test.yaml │ ├── mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml │ ├── mask_rcnn_R_50_FPN_training_acc_test.yaml │ ├── panoptic_fpn_R_50_inference_acc_test.yaml │ ├── panoptic_fpn_R_50_instant_test.yaml │ ├── panoptic_fpn_R_50_training_acc_test.yaml │ ├── retinanet_R_50_FPN_inference_acc_test.yaml │ ├── retinanet_R_50_FPN_instant_test.yaml │ ├── rpn_R_50_FPN_inference_acc_test.yaml │ ├── rpn_R_50_FPN_instant_test.yaml │ ├── semantic_R_50_FPN_inference_acc_test.yaml │ ├── semantic_R_50_FPN_instant_test.yaml │ └── semantic_R_50_FPN_training_acc_test.yaml ├── conversion ├── convert_coco2cocovid.py ├── convert_davis2ytvis.py ├── convert_fss2coco.py ├── convert_lvos2ytvis_vos.py ├── convert_lvos2ytvis_vos_val.py ├── convert_mix_ref.py ├── convert_mose2ytvis.py ├── convert_mose2ytvis_val.py ├── convert_ref2coco.py ├── convert_ref2cocovid.py ├── convert_refdavis2refytvos.py ├── convert_refdavis2ytvis_val.py ├── convert_refytvos2cocovid.py ├── convert_refytvos2cocovid_val.py ├── convert_vg2coco.py ├── convert_vg2cocovid.py ├── convert_ytvos2ytvis.py ├── convert_ytvos2ytvis_val.py ├── download_objects365_v2.py └── models │ └── convert_pth2pkl.py ├── datasets ├── README.md ├── prepare_ade20k_sem_seg.py ├── prepare_cocofied_lvis.py ├── prepare_for_tests.sh └── prepare_panoptic_fpn.py ├── demo ├── README.md ├── demo.py └── predictor.py ├── detectron2 ├── __init__.py ├── checkpoint │ ├── __init__.py │ ├── c2_model_loading.py │ ├── catalog.py │ └── detection_checkpoint.py ├── config │ ├── __init__.py │ ├── compat.py │ ├── config.py │ ├── defaults.py │ ├── instantiate.py │ └── lazy.py ├── data │ ├── __init__.py │ ├── benchmark.py │ ├── build.py │ ├── catalog.py │ ├── common.py │ ├── dataset_mapper.py │ ├── datasets │ │ ├── README.md │ │ ├── __init__.py │ │ ├── builtin.py │ │ ├── builtin_meta.py │ │ ├── cityscapes.py │ │ ├── cityscapes_panoptic.py │ │ ├── coco.py │ │ ├── coco_panoptic.py │ │ ├── lvis.py │ │ ├── lvis_v0_5_categories.py │ │ ├── lvis_v1_categories.py │ │ ├── pascal_voc.py │ │ └── register_coco.py │ ├── detection_utils.py │ ├── samplers │ │ ├── __init__.py │ │ ├── distributed_sampler.py │ │ └── grouped_batch_sampler.py │ └── transforms │ │ ├── __init__.py │ │ ├── augmentation.py │ │ ├── augmentation_impl.py │ │ └── transform.py ├── engine │ ├── __init__.py │ ├── defaults.py │ ├── hooks.py │ ├── launch.py │ └── train_loop.py ├── evaluation │ ├── __init__.py │ ├── cityscapes_evaluation.py │ ├── coco_evaluation.py │ ├── evaluator.py │ ├── fast_eval_api.py │ ├── lvis_evaluation.py │ ├── panoptic_evaluation.py │ ├── pascal_voc_evaluation.py │ ├── refcocoeval.py │ ├── rotated_coco_evaluation.py │ ├── sem_seg_evaluation.py │ └── testing.py ├── export │ ├── README.md │ ├── __init__.py │ ├── api.py │ ├── c10.py │ ├── caffe2_export.py │ ├── caffe2_inference.py │ ├── caffe2_modeling.py │ ├── caffe2_patch.py │ ├── flatten.py │ ├── shared.py │ ├── torchscript.py │ └── torchscript_patch.py ├── layers │ ├── __init__.py │ ├── aspp.py │ ├── batch_norm.py │ ├── blocks.py │ ├── csrc │ │ ├── README.md │ │ ├── ROIAlignRotated │ │ │ ├── ROIAlignRotated.h │ │ │ ├── ROIAlignRotated_cpu.cpp │ │ │ └── ROIAlignRotated_cuda.cu │ │ ├── box_iou_rotated │ │ │ ├── box_iou_rotated.h │ │ │ ├── box_iou_rotated_cpu.cpp │ │ │ ├── box_iou_rotated_cuda.cu │ │ │ └── box_iou_rotated_utils.h │ │ ├── cocoeval │ │ │ ├── cocoeval.cpp │ │ │ └── cocoeval.h │ │ ├── cuda_version.cu │ │ ├── deformable │ │ │ ├── deform_conv.h │ │ │ ├── deform_conv_cuda.cu │ │ │ └── deform_conv_cuda_kernel.cu │ │ ├── nms_rotated │ │ │ ├── nms_rotated.h │ │ │ ├── nms_rotated_cpu.cpp │ │ │ └── nms_rotated_cuda.cu │ │ └── vision.cpp │ ├── deform_conv.py │ ├── losses.py │ ├── mask_ops.py │ ├── nms.py │ ├── roi_align.py │ ├── roi_align_rotated.py │ ├── rotated_boxes.py │ ├── shape_spec.py │ └── wrappers.py ├── model_zoo │ ├── __init__.py │ └── model_zoo.py ├── modeling │ ├── __init__.py │ ├── anchor_generator.py │ ├── backbone │ │ ├── __init__.py │ │ ├── backbone.py │ │ ├── build.py │ │ ├── fpn.py │ │ ├── regnet.py │ │ ├── resnet.py │ │ ├── utils.py │ │ └── vit.py │ ├── box_regression.py │ ├── matcher.py │ ├── meta_arch │ │ ├── __init__.py │ │ ├── build.py │ │ ├── dense_detector.py │ │ ├── fcos.py │ │ ├── panoptic_fpn.py │ │ ├── rcnn.py │ │ ├── retinanet.py │ │ └── semantic_seg.py │ ├── mmdet_wrapper.py │ ├── poolers.py │ ├── postprocessing.py │ ├── proposal_generator │ │ ├── __init__.py │ │ ├── build.py │ │ ├── proposal_utils.py │ │ ├── rpn.py │ │ └── rrpn.py │ ├── roi_heads │ │ ├── __init__.py │ │ ├── box_head.py │ │ ├── cascade_rcnn.py │ │ ├── fast_rcnn.py │ │ ├── keypoint_head.py │ │ ├── mask_head.py │ │ ├── roi_heads.py │ │ └── rotated_fast_rcnn.py │ ├── sampling.py │ └── test_time_augmentation.py ├── projects │ ├── README.md │ └── __init__.py ├── solver │ ├── __init__.py │ ├── build.py │ └── lr_scheduler.py ├── structures │ ├── __init__.py │ ├── boxes.py │ ├── image_list.py │ ├── instances.py │ ├── keypoints.py │ ├── masks.py │ └── rotated_boxes.py ├── tracking │ ├── __init__.py │ ├── base_tracker.py │ ├── bbox_iou_tracker.py │ ├── hungarian_tracker.py │ ├── iou_weighted_hungarian_bbox_iou_tracker.py │ ├── utils.py │ └── vanilla_hungarian_bbox_iou_tracker.py └── utils │ ├── README.md │ ├── __init__.py │ ├── analysis.py │ ├── collect_env.py │ ├── colormap.py │ ├── comm.py │ ├── develop.py │ ├── env.py │ ├── events.py │ ├── file_io.py │ ├── logger.py │ ├── memory.py │ ├── registry.py │ ├── serialize.py │ ├── testing.py │ ├── video_visualizer.py │ └── visualizer.py ├── dev ├── README.md ├── linter.sh ├── packaging │ ├── README.md │ ├── build_all_wheels.sh │ ├── build_wheel.sh │ ├── gen_install_table.py │ ├── gen_wheel_index.sh │ └── pkg_helpers.bash ├── parse_results.sh ├── run_inference_tests.sh └── run_instant_tests.sh ├── docker ├── Dockerfile ├── README.md ├── deploy.Dockerfile └── docker-compose.yml ├── docs ├── .gitignore ├── Makefile ├── README.md ├── _static │ └── css │ │ └── custom.css ├── conf.py ├── index.rst ├── modules │ ├── checkpoint.rst │ ├── config.rst │ ├── data.rst │ ├── data_transforms.rst │ ├── engine.rst │ ├── evaluation.rst │ ├── export.rst │ ├── fvcore.rst │ ├── index.rst │ ├── layers.rst │ ├── model_zoo.rst │ ├── modeling.rst │ ├── solver.rst │ ├── structures.rst │ └── utils.rst ├── notes │ ├── benchmarks.md │ ├── changelog.md │ ├── compatibility.md │ ├── contributing.md │ └── index.rst └── tutorials │ ├── README.md │ ├── augmentation.md │ ├── builtin_datasets.md │ ├── configs.md │ ├── data_loading.md │ ├── datasets.md │ ├── deployment.md │ ├── evaluation.md │ ├── extend.md │ ├── getting_started.md │ ├── index.rst │ ├── install.md │ ├── lazyconfigs.md │ ├── models.md │ ├── training.md │ └── write-models.md ├── external ├── davis2017-evaluation │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── davis2017 │ │ ├── __init__.py │ │ ├── davis.py │ │ ├── evaluation.py │ │ ├── metrics.py │ │ ├── results.py │ │ └── utils.py │ ├── evaluation_codalab.py │ ├── evaluation_method.py │ ├── pytest │ │ └── test_evaluation.py │ ├── setup.cfg │ └── setup.py └── lvos-evaluation │ ├── .DS_Store │ ├── README.md │ ├── evaluation_codalab.py │ ├── evaluation_method.py │ ├── lvos │ ├── __init__.py │ ├── evaluation.py │ ├── evaluation_mp.py │ ├── lvos_seperate.py │ ├── metrics.py │ ├── results.py │ └── utils.py │ ├── setup.cfg │ ├── setup.py │ └── unseen_videos.txt ├── projects ├── .DS_Store ├── README.md └── UniRef │ ├── configs │ ├── eval │ │ ├── r50 │ │ │ ├── eval_fss_r50.yaml │ │ │ ├── eval_rec_r50.yaml │ │ │ ├── eval_rvos_r50.yaml │ │ │ └── eval_vos_r50.yaml │ │ └── swin-l │ │ │ ├── eval_fss_swin-l.yaml │ │ │ ├── eval_rec_swin-l.yaml │ │ │ ├── eval_rvos_swin-l.yaml │ │ │ └── eval_vos_swin-l.yaml │ ├── image │ │ ├── joint_task_det_rec_r50_16gpu.yaml │ │ ├── joint_task_det_rec_swin-l_16gpu.yaml │ │ ├── joint_task_finetune_det_rec_fss_r50_16gpu.yaml │ │ └── joint_task_finetune_det_rec_fss_swin-l_16gpu.yaml │ ├── pretrain │ │ ├── obj365v2_r50_32gpu.yaml │ │ └── obj365v2_swin-l_32gpu.yaml │ ├── sam │ │ ├── eval │ │ │ ├── eval_sam_fss.yaml │ │ │ ├── eval_sam_rec.yaml │ │ │ ├── eval_sam_rvos.yaml │ │ │ └── eval_sam_vos.yaml │ │ ├── sam_image_joint_rec_fss_8gpu.yaml │ │ └── sam_video_joint_vos_rvos_8gpu.yaml │ └── video │ │ ├── joint_task_vos_rvos_r50_16gpu.yaml │ │ └── joint_task_vos_rvos_swin-l_16gpu.yaml │ ├── predictor.py │ ├── train_net.py │ └── uniref │ ├── __init__.py │ ├── backbone │ ├── __init__.py │ ├── convnext.py │ ├── masked_backbone.py │ ├── pos_embed.py │ ├── swin.py │ ├── vit.py │ └── vit_utils.py │ ├── config.py │ ├── data │ ├── __init__.py │ ├── augmentation.py │ ├── build.py │ ├── coco_dataset_mapper.py │ ├── custom_dataset_dataloader.py │ ├── dataset_mapper.py │ ├── datasets │ │ ├── __init__.py │ │ ├── builtin.py │ │ ├── fss.py │ │ ├── objects365.py │ │ ├── objects365_v2.py │ │ ├── refcoco.py │ │ └── ytvis.py │ ├── mixup.py │ ├── sam_dataset_mapper.py │ ├── ytvis_dataset_mapper.py │ ├── ytvis_eval.py │ └── ytvis_sam_dataset_mapper.py │ ├── models │ ├── conv_with_kaiming_uniform.py │ ├── ddetrs.py │ ├── deformable_detr │ │ ├── __init__.py │ │ ├── backbone.py │ │ ├── criterion.py │ │ ├── deformable_detr.py │ │ ├── deformable_transformer.py │ │ ├── matcher.py │ │ ├── ops │ │ │ ├── functions │ │ │ │ ├── __init__.py │ │ │ │ └── ms_deform_attn_func.py │ │ │ ├── make.sh │ │ │ ├── modules │ │ │ │ ├── __init__.py │ │ │ │ └── ms_deform_attn.py │ │ │ ├── setup.py │ │ │ ├── src │ │ │ │ ├── cpu │ │ │ │ │ ├── ms_deform_attn_cpu.cpp │ │ │ │ │ └── ms_deform_attn_cpu.h │ │ │ │ ├── cuda │ │ │ │ │ ├── ms_deform_attn_cuda.cu │ │ │ │ │ ├── ms_deform_attn_cuda.h │ │ │ │ │ └── ms_deform_im2col_cuda.cuh │ │ │ │ ├── ms_deform_attn.h │ │ │ │ └── vision.cpp │ │ │ └── test.py │ │ ├── position_encoding.py │ │ └── segmentation.py │ ├── fuse_helper │ │ ├── __init__.py │ │ ├── flash_attention.py │ │ ├── fuse_helper.py │ │ └── unifusion.py │ ├── segment_anything │ │ ├── __init__.py │ │ ├── automatic_mask_generator.py │ │ ├── build_sam.py │ │ ├── modeling │ │ │ ├── __init__.py │ │ │ ├── common.py │ │ │ ├── image_encoder.py │ │ │ ├── mask_decoder.py │ │ │ ├── prompt_encoder.py │ │ │ ├── sam.py │ │ │ └── transformer.py │ │ ├── predictor.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── amg.py │ │ │ ├── onnx.py │ │ │ └── transforms.py │ ├── uniref_sam.py │ └── vos_helper │ │ ├── __init__.py │ │ ├── cbam.py │ │ ├── losses.py │ │ ├── mod_resnet.py │ │ └── modules.py │ ├── uniref.py │ ├── uniref_sam.py │ └── util │ ├── __init__.py │ ├── box_ops.py │ ├── misc.py │ └── plot_utils.py ├── requirements.txt ├── setup.cfg ├── setup.py ├── tests ├── README.md ├── __init__.py ├── config │ ├── dir1 │ │ ├── dir1_a.py │ │ └── dir1_b.py │ ├── root_cfg.py │ ├── test_instantiate_config.py │ ├── test_lazy_config.py │ └── test_yacs_config.py ├── data │ ├── __init__.py │ ├── test_coco.py │ ├── test_coco_evaluation.py │ ├── test_dataset.py │ ├── test_detection_utils.py │ ├── test_rotation_transform.py │ ├── test_sampler.py │ └── test_transforms.py ├── layers │ ├── __init__.py │ ├── test_blocks.py │ ├── test_deformable.py │ ├── test_losses.py │ ├── test_mask_ops.py │ ├── test_nms.py │ ├── test_nms_rotated.py │ ├── test_roi_align.py │ └── test_roi_align_rotated.py ├── modeling │ ├── __init__.py │ ├── test_anchor_generator.py │ ├── test_backbone.py │ ├── test_box2box_transform.py │ ├── test_fast_rcnn.py │ ├── test_matcher.py │ ├── test_mmdet.py │ ├── test_model_e2e.py │ ├── test_roi_heads.py │ ├── test_roi_pooler.py │ └── test_rpn.py ├── structures │ ├── __init__.py │ ├── test_boxes.py │ ├── test_imagelist.py │ ├── test_instances.py │ ├── test_keypoints.py │ ├── test_masks.py │ └── test_rotated_boxes.py ├── test_checkpoint.py ├── test_engine.py ├── test_events.py ├── test_export_caffe2.py ├── test_export_torchscript.py ├── test_model_analysis.py ├── test_model_zoo.py ├── test_packaging.py ├── test_registry.py ├── test_scheduler.py ├── test_solver.py ├── test_visualizer.py └── tracking │ ├── __init__.py │ ├── test_bbox_iou_tracker.py │ ├── test_hungarian_tracker.py │ ├── test_iou_weighted_hungarian_bbox_iou_tracker.py │ └── test_vanilla_hungarian_bbox_iou_tracker.py └── tools ├── README.md ├── __init__.py ├── analyze_model.py ├── benchmark.py ├── convert-torchvision-to-d2.py ├── deploy ├── README.md ├── export_model.py └── torchscript_mask_rcnn.cpp ├── lazyconfig_train_net.py ├── lightning_train_net.py ├── plain_train_net.py ├── train_net.py ├── visualize_data.py └── visualize_json_results.py /.gitignore: -------------------------------------------------------------------------------- 1 | # output dir 2 | output 3 | instant_test_output 4 | inference_test_output 5 | 6 | 7 | *.png 8 | *.json 9 | *.diff 10 | *.jpg 11 | *.tar 12 | !/projects/DensePose/doc/images/*.jpg 13 | 14 | # compilation and distribution 15 | __pycache__ 16 | _ext 17 | *.pyc 18 | *.pyd 19 | *.so 20 | *.dll 21 | *.egg-info/ 22 | build/ 23 | dist/ 24 | wheels/ 25 | 26 | # pytorch/python/numpy formats 27 | *.pth 28 | *.pkl 29 | *.npy 30 | *.ts 31 | model_ts*.txt 32 | 33 | # ipython/jupyter notebooks 34 | *.ipynb 35 | **/.ipynb_checkpoints/ 36 | 37 | # Editor temporaries 38 | *.swn 39 | *.swo 40 | *.swp 41 | *~ 42 | 43 | # editor settings 44 | .idea 45 | .vscode 46 | _darcs 47 | 48 | # project dirs 49 | /detectron2/model_zoo/configs 50 | /datasets/* 51 | !/datasets/*.* 52 | /projects/*/datasets 53 | /models 54 | /snippet 55 | 56 | # .txt 57 | *.txt* 58 | 59 | *.zip 60 | *.npz 61 | 62 | events.* 63 | 64 | *.bin 65 | 66 | # ReferFormer 67 | external/ReferFormer/data/ 68 | 69 | last_checkpoint 70 | 71 | OVIS 72 | 73 | external/bytetrack_unitrack_bdd 74 | 75 | *.csv -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## Requirements 4 | 5 | we run our code in the following environment: 6 | 7 | - CUDA 11.7 8 | - Python 3.9 9 | - Pytorch 2.1.0 10 | 11 | ## Setup 12 | 13 | First, clone the repository. 14 | 15 | ``` 16 | git clone https://github.com/FoundationVision/UniRef.git 17 | ``` 18 | 19 | Second, install the detectron2. 20 | 21 | ``` 22 | pip3 install -e . --user 23 | ``` 24 | 25 | Third, install the necessary packages. 26 | 27 | ``` 28 | pip3 install -r requirements.txt --user 29 | pip3 install git+https://github.com/youtubevos/cocoapi.git#"egg=pycocotools&subdirectory=PythonAPI" --user 30 | # flash-attn 31 | python3 -m pip install ninja 32 | python3 -m pip install flash-attn==2.0.6 33 | ``` 34 | 35 | Finally, compile deformable attention CUDA operator. 36 | 37 | ``` 38 | cd projects/UniRef/uniref/models/deformable_detr/ops; 39 | bash make.sh 40 | ``` 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 FoundationVision 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /assets/RIS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/assets/RIS.png -------------------------------------------------------------------------------- /assets/Ref-vos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/assets/Ref-vos.png -------------------------------------------------------------------------------- /assets/VOS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/assets/VOS.png -------------------------------------------------------------------------------- /assets/network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/assets/network.png -------------------------------------------------------------------------------- /assets/results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/assets/results.png -------------------------------------------------------------------------------- /assets/zero-few-shot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/assets/zero-few-shot.png -------------------------------------------------------------------------------- /configs/Base-RCNN-C4.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | RPN: 4 | PRE_NMS_TOPK_TEST: 6000 5 | POST_NMS_TOPK_TEST: 1000 6 | ROI_HEADS: 7 | NAME: "Res5ROIHeads" 8 | DATASETS: 9 | TRAIN: ("coco_2017_train",) 10 | TEST: ("coco_2017_val",) 11 | SOLVER: 12 | IMS_PER_BATCH: 16 13 | BASE_LR: 0.02 14 | STEPS: (60000, 80000) 15 | MAX_ITER: 90000 16 | INPUT: 17 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 18 | VERSION: 2 19 | -------------------------------------------------------------------------------- /configs/Base-RCNN-DilatedC5.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | RESNETS: 4 | OUT_FEATURES: ["res5"] 5 | RES5_DILATION: 2 6 | RPN: 7 | IN_FEATURES: ["res5"] 8 | PRE_NMS_TOPK_TEST: 6000 9 | POST_NMS_TOPK_TEST: 1000 10 | ROI_HEADS: 11 | NAME: "StandardROIHeads" 12 | IN_FEATURES: ["res5"] 13 | ROI_BOX_HEAD: 14 | NAME: "FastRCNNConvFCHead" 15 | NUM_FC: 2 16 | POOLER_RESOLUTION: 7 17 | ROI_MASK_HEAD: 18 | NAME: "MaskRCNNConvUpsampleHead" 19 | NUM_CONV: 4 20 | POOLER_RESOLUTION: 14 21 | DATASETS: 22 | TRAIN: ("coco_2017_train",) 23 | TEST: ("coco_2017_val",) 24 | SOLVER: 25 | IMS_PER_BATCH: 16 26 | BASE_LR: 0.02 27 | STEPS: (60000, 80000) 28 | MAX_ITER: 90000 29 | INPUT: 30 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 31 | VERSION: 2 32 | -------------------------------------------------------------------------------- /configs/Base-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | BACKBONE: 4 | NAME: "build_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 7 | FPN: 8 | IN_FEATURES: ["res2", "res3", "res4", "res5"] 9 | ANCHOR_GENERATOR: 10 | SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map 11 | ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) 12 | RPN: 13 | IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] 14 | PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level 15 | PRE_NMS_TOPK_TEST: 1000 # Per FPN level 16 | # Detectron1 uses 2000 proposals per-batch, 17 | # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) 18 | # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. 19 | POST_NMS_TOPK_TRAIN: 1000 20 | POST_NMS_TOPK_TEST: 1000 21 | ROI_HEADS: 22 | NAME: "StandardROIHeads" 23 | IN_FEATURES: ["p2", "p3", "p4", "p5"] 24 | ROI_BOX_HEAD: 25 | NAME: "FastRCNNConvFCHead" 26 | NUM_FC: 2 27 | POOLER_RESOLUTION: 7 28 | ROI_MASK_HEAD: 29 | NAME: "MaskRCNNConvUpsampleHead" 30 | NUM_CONV: 4 31 | POOLER_RESOLUTION: 14 32 | DATASETS: 33 | TRAIN: ("coco_2017_train",) 34 | TEST: ("coco_2017_val",) 35 | SOLVER: 36 | IMS_PER_BATCH: 16 37 | BASE_LR: 0.02 38 | STEPS: (60000, 80000) 39 | MAX_ITER: 90000 40 | INPUT: 41 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 42 | VERSION: 2 43 | -------------------------------------------------------------------------------- /configs/Base-RetinaNet.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "RetinaNet" 3 | BACKBONE: 4 | NAME: "build_retinanet_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res3", "res4", "res5"] 7 | ANCHOR_GENERATOR: 8 | SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"] 9 | FPN: 10 | IN_FEATURES: ["res3", "res4", "res5"] 11 | RETINANET: 12 | IOU_THRESHOLDS: [0.4, 0.5] 13 | IOU_LABELS: [0, -1, 1] 14 | SMOOTH_L1_LOSS_BETA: 0.0 15 | DATASETS: 16 | TRAIN: ("coco_2017_train",) 17 | TEST: ("coco_2017_val",) 18 | SOLVER: 19 | IMS_PER_BATCH: 16 20 | BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate 21 | STEPS: (60000, 80000) 22 | MAX_ITER: 90000 23 | INPUT: 24 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 25 | VERSION: 2 26 | -------------------------------------------------------------------------------- /configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | LOAD_PROPOSALS: True 6 | RESNETS: 7 | DEPTH: 50 8 | PROPOSAL_GENERATOR: 9 | NAME: "PrecomputedProposals" 10 | DATASETS: 11 | TRAIN: ("coco_2017_train",) 12 | PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_train_box_proposals_21bc3a.pkl", ) 13 | TEST: ("coco_2017_val",) 14 | PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) 15 | DATALOADER: 16 | # proposals are part of the dataset_dicts, and take a lot of RAM 17 | NUM_WORKERS: 2 18 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | MASK_ON: False 4 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 5 | PIXEL_STD: [57.375, 57.120, 58.395] 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | SOLVER: 12 | STEPS: (210000, 250000) 13 | MAX_ITER: 270000 14 | -------------------------------------------------------------------------------- /configs/COCO-Detection/fcos_R_50_FPN_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.optim import SGD as optimizer 2 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 3 | from ..common.data.coco import dataloader 4 | from ..common.models.fcos import model 5 | from ..common.train import train 6 | 7 | dataloader.train.mapper.use_instance_mask = False 8 | optimizer.lr = 0.01 9 | 10 | model.backbone.bottom_up.freeze_at = 2 11 | train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 12 | -------------------------------------------------------------------------------- /configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RetinaNet.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /configs/COCO-Detection/retinanet_R_50_FPN_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.optim import SGD as optimizer 2 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 3 | from ..common.data.coco import dataloader 4 | from ..common.models.retinanet import model 5 | from ..common.train import train 6 | 7 | dataloader.train.mapper.use_instance_mask = False 8 | model.backbone.bottom_up.freeze_at = 2 9 | optimizer.lr = 0.01 10 | 11 | train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 12 | -------------------------------------------------------------------------------- /configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RetinaNet.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RetinaNet.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /configs/COCO-Detection/rpn_R_50_C4_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "ProposalNetwork" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: False 6 | RESNETS: 7 | DEPTH: 50 8 | RPN: 9 | PRE_NMS_TOPK_TEST: 12000 10 | POST_NMS_TOPK_TEST: 2000 11 | -------------------------------------------------------------------------------- /configs/COCO-Detection/rpn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "ProposalNetwork" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: False 6 | RESNETS: 7 | DEPTH: 50 8 | RPN: 9 | POST_NMS_TOPK_TEST: 2000 10 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.train import train 2 | from ..common.optim import SGD as optimizer 3 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 4 | from ..common.data.coco import dataloader 5 | from ..common.models.mask_rcnn_c4 import model 6 | 7 | model.backbone.freeze_at = 2 8 | train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 9 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-DilatedC5.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.optim import SGD as optimizer 2 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 3 | from ..common.data.coco import dataloader 4 | from ..common.models.mask_rcnn_fpn import model 5 | from ..common.train import train 6 | 7 | model.backbone.bottom_up.freeze_at = 2 8 | train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 9 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | RPN: 8 | BBOX_REG_LOSS_TYPE: "giou" 9 | BBOX_REG_LOSS_WEIGHT: 2.0 10 | ROI_BOX_HEAD: 11 | BBOX_REG_LOSS_TYPE: "giou" 12 | BBOX_REG_LOSS_WEIGHT: 10.0 13 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | SOLVER: 8 | STEPS: (210000, 250000) 9 | MAX_ITER: 270000 10 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 5 | PIXEL_STD: [57.375, 57.120, 58.395] 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | SOLVER: 12 | STEPS: (210000, 250000) 13 | MAX_ITER: 270000 14 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.optim import SGD as optimizer 2 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 3 | from ..common.data.coco import dataloader 4 | from ..common.models.mask_rcnn_fpn import model 5 | from ..common.train import train 6 | 7 | from detectron2.config import LazyCall as L 8 | from detectron2.modeling.backbone import RegNet 9 | from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock 10 | 11 | 12 | # Replace default ResNet with RegNetX-4GF from the DDS paper. Config source: 13 | # https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnetx/RegNetX-4.0GF_dds_8gpu.yaml#L4-L9 # noqa 14 | model.backbone.bottom_up = L(RegNet)( 15 | stem_class=SimpleStem, 16 | stem_width=32, 17 | block_class=ResBottleneckBlock, 18 | depth=23, 19 | w_a=38.65, 20 | w_0=96, 21 | w_m=2.43, 22 | group_width=40, 23 | freeze_at=2, 24 | norm="FrozenBN", 25 | out_features=["s1", "s2", "s3", "s4"], 26 | ) 27 | model.pixel_std = [57.375, 57.120, 58.395] 28 | 29 | optimizer.weight_decay = 5e-5 30 | train.init_checkpoint = ( 31 | "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth" 32 | ) 33 | # RegNets benefit from enabling cudnn benchmark mode 34 | train.cudnn_benchmark = True 35 | -------------------------------------------------------------------------------- /configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.optim import SGD as optimizer 2 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 3 | from ..common.data.coco import dataloader 4 | from ..common.models.mask_rcnn_fpn import model 5 | from ..common.train import train 6 | 7 | from detectron2.config import LazyCall as L 8 | from detectron2.modeling.backbone import RegNet 9 | from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock 10 | 11 | 12 | # Replace default ResNet with RegNetY-4GF from the DDS paper. Config source: 13 | # https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa 14 | model.backbone.bottom_up = L(RegNet)( 15 | stem_class=SimpleStem, 16 | stem_width=32, 17 | block_class=ResBottleneckBlock, 18 | depth=22, 19 | w_a=31.41, 20 | w_0=96, 21 | w_m=2.24, 22 | group_width=64, 23 | se_ratio=0.25, 24 | freeze_at=2, 25 | norm="FrozenBN", 26 | out_features=["s1", "s2", "s3", "s4"], 27 | ) 28 | model.pixel_std = [57.375, 57.120, 58.395] 29 | 30 | optimizer.weight_decay = 5e-5 31 | train.init_checkpoint = ( 32 | "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth" 33 | ) 34 | # RegNets benefit from enabling cudnn benchmark mode 35 | train.cudnn_benchmark = True 36 | -------------------------------------------------------------------------------- /configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | KEYPOINT_ON: True 4 | ROI_HEADS: 5 | NUM_CLASSES: 1 6 | ROI_BOX_HEAD: 7 | SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss 8 | RPN: 9 | # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. 10 | # 1000 proposals per-image is found to hurt box AP. 11 | # Therefore we increase it to 1500 per-image. 12 | POST_NMS_TOPK_TRAIN: 1500 13 | DATASETS: 14 | TRAIN: ("keypoints_coco_2017_train",) 15 | TEST: ("keypoints_coco_2017_val",) 16 | -------------------------------------------------------------------------------- /configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.optim import SGD as optimizer 2 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 3 | from ..common.data.coco_keypoint import dataloader 4 | from ..common.models.keypoint_rcnn_fpn import model 5 | from ..common.train import train 6 | 7 | model.backbone.bottom_up.freeze_at = 2 8 | train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 9 | -------------------------------------------------------------------------------- /configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Keypoint-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 4 | PIXEL_STD: [57.375, 57.120, 58.395] 5 | RESNETS: 6 | STRIDE_IN_1X1: False # this is a C2 model 7 | NUM_GROUPS: 32 8 | WIDTH_PER_GROUP: 8 9 | DEPTH: 101 10 | SOLVER: 11 | STEPS: (210000, 250000) 12 | MAX_ITER: 270000 13 | -------------------------------------------------------------------------------- /configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "PanopticFPN" 4 | MASK_ON: True 5 | SEM_SEG_HEAD: 6 | LOSS_WEIGHT: 0.5 7 | DATASETS: 8 | TRAIN: ("coco_2017_train_panoptic_separated",) 9 | TEST: ("coco_2017_val_panoptic_separated",) 10 | DATALOADER: 11 | FILTER_EMPTY_ANNOTATIONS: False 12 | -------------------------------------------------------------------------------- /configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Panoptic-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | RESNETS: 5 | DEPTH: 101 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py: -------------------------------------------------------------------------------- 1 | from ..common.optim import SGD as optimizer 2 | from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier 3 | from ..common.data.coco_panoptic_separated import dataloader 4 | from ..common.models.panoptic_fpn import model 5 | from ..common.train import train 6 | 7 | model.backbone.bottom_up.freeze_at = 2 8 | train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 9 | -------------------------------------------------------------------------------- /configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Panoptic-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | -------------------------------------------------------------------------------- /configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Panoptic-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | RESNETS: 5 | DEPTH: 50 6 | SOLVER: 7 | STEPS: (210000, 250000) 8 | MAX_ITER: 270000 9 | -------------------------------------------------------------------------------- /configs/Cityscapes/mask_rcnn_R_50_FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | # WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | # For better, more stable performance initialize from COCO 5 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" 6 | MASK_ON: True 7 | ROI_HEADS: 8 | NUM_CLASSES: 8 9 | # This is similar to the setting used in Mask R-CNN paper, Appendix A 10 | # But there are some differences, e.g., we did not initialize the output 11 | # layer using the corresponding classes from COCO 12 | INPUT: 13 | MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) 14 | MIN_SIZE_TRAIN_SAMPLING: "choice" 15 | MIN_SIZE_TEST: 1024 16 | MAX_SIZE_TRAIN: 2048 17 | MAX_SIZE_TEST: 2048 18 | DATASETS: 19 | TRAIN: ("cityscapes_fine_instance_seg_train",) 20 | TEST: ("cityscapes_fine_instance_seg_val",) 21 | SOLVER: 22 | BASE_LR: 0.01 23 | STEPS: (18000,) 24 | MAX_ITER: 24000 25 | IMS_PER_BATCH: 8 26 | TEST: 27 | EVAL_PERIOD: 8000 28 | -------------------------------------------------------------------------------- /configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | # Detectron1 uses smooth L1 loss with some magic beta values. 8 | # The defaults are changed to L1 loss in Detectron2. 9 | RPN: 10 | SMOOTH_L1_BETA: 0.1111 11 | ROI_BOX_HEAD: 12 | SMOOTH_L1_BETA: 1.0 13 | POOLER_SAMPLING_RATIO: 2 14 | POOLER_TYPE: "ROIAlign" 15 | INPUT: 16 | # no scale augmentation 17 | MIN_SIZE_TRAIN: (800, ) 18 | -------------------------------------------------------------------------------- /configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 1 9 | ROI_KEYPOINT_HEAD: 10 | POOLER_RESOLUTION: 14 11 | POOLER_SAMPLING_RATIO: 2 12 | POOLER_TYPE: "ROIAlign" 13 | # Detectron1 uses smooth L1 loss with some magic beta values. 14 | # The defaults are changed to L1 loss in Detectron2. 15 | ROI_BOX_HEAD: 16 | SMOOTH_L1_BETA: 1.0 17 | POOLER_SAMPLING_RATIO: 2 18 | POOLER_TYPE: "ROIAlign" 19 | RPN: 20 | SMOOTH_L1_BETA: 0.1111 21 | # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2 22 | # 1000 proposals per-image is found to hurt box AP. 23 | # Therefore we increase it to 1500 per-image. 24 | POST_NMS_TOPK_TRAIN: 1500 25 | DATASETS: 26 | TRAIN: ("keypoints_coco_2017_train",) 27 | TEST: ("keypoints_coco_2017_val",) 28 | -------------------------------------------------------------------------------- /configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | # Detectron1 uses smooth L1 loss with some magic beta values. 8 | # The defaults are changed to L1 loss in Detectron2. 9 | RPN: 10 | SMOOTH_L1_BETA: 0.1111 11 | ROI_BOX_HEAD: 12 | SMOOTH_L1_BETA: 1.0 13 | POOLER_SAMPLING_RATIO: 2 14 | POOLER_TYPE: "ROIAlign" 15 | ROI_MASK_HEAD: 16 | POOLER_SAMPLING_RATIO: 2 17 | POOLER_TYPE: "ROIAlign" 18 | INPUT: 19 | # no scale augmentation 20 | MIN_SIZE_TRAIN: (800, ) 21 | -------------------------------------------------------------------------------- /configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | ROI_HEADS: 8 | NUM_CLASSES: 1230 9 | SCORE_THRESH_TEST: 0.0001 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | DATASETS: 13 | TRAIN: ("lvis_v0.5_train",) 14 | TEST: ("lvis_v0.5_val",) 15 | TEST: 16 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 17 | DATALOADER: 18 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 19 | REPEAT_THRESHOLD: 0.001 20 | -------------------------------------------------------------------------------- /configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 1230 9 | SCORE_THRESH_TEST: 0.0001 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | DATASETS: 13 | TRAIN: ("lvis_v0.5_train",) 14 | TEST: ("lvis_v0.5_val",) 15 | TEST: 16 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 17 | DATALOADER: 18 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 19 | REPEAT_THRESHOLD: 0.001 20 | -------------------------------------------------------------------------------- /configs/LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 4 | PIXEL_STD: [57.375, 57.120, 58.395] 5 | MASK_ON: True 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | ROI_HEADS: 12 | NUM_CLASSES: 1230 13 | SCORE_THRESH_TEST: 0.0001 14 | INPUT: 15 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 16 | DATASETS: 17 | TRAIN: ("lvis_v0.5_train",) 18 | TEST: ("lvis_v0.5_val",) 19 | TEST: 20 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 21 | DATALOADER: 22 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 23 | REPEAT_THRESHOLD: 0.001 24 | -------------------------------------------------------------------------------- /configs/LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 101 7 | ROI_HEADS: 8 | NUM_CLASSES: 1203 9 | SCORE_THRESH_TEST: 0.0001 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | DATASETS: 13 | TRAIN: ("lvis_v1_train",) 14 | TEST: ("lvis_v1_val",) 15 | TEST: 16 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 17 | SOLVER: 18 | STEPS: (120000, 160000) 19 | MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs 20 | DATALOADER: 21 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 22 | REPEAT_THRESHOLD: 0.001 23 | -------------------------------------------------------------------------------- /configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 1203 9 | SCORE_THRESH_TEST: 0.0001 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | DATASETS: 13 | TRAIN: ("lvis_v1_train",) 14 | TEST: ("lvis_v1_val",) 15 | TEST: 16 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 17 | SOLVER: 18 | STEPS: (120000, 160000) 19 | MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs 20 | DATALOADER: 21 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 22 | REPEAT_THRESHOLD: 0.001 23 | -------------------------------------------------------------------------------- /configs/LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" 4 | PIXEL_STD: [57.375, 57.120, 58.395] 5 | MASK_ON: True 6 | RESNETS: 7 | STRIDE_IN_1X1: False # this is a C2 model 8 | NUM_GROUPS: 32 9 | WIDTH_PER_GROUP: 8 10 | DEPTH: 101 11 | ROI_HEADS: 12 | NUM_CLASSES: 1203 13 | SCORE_THRESH_TEST: 0.0001 14 | INPUT: 15 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 16 | DATASETS: 17 | TRAIN: ("lvis_v1_train",) 18 | TEST: ("lvis_v1_val",) 19 | SOLVER: 20 | STEPS: (120000, 160000) 21 | MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs 22 | TEST: 23 | DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 24 | DATALOADER: 25 | SAMPLER_TRAIN: "RepeatFactorTrainingSampler" 26 | REPEAT_THRESHOLD: 0.001 27 | -------------------------------------------------------------------------------- /configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NAME: CascadeROIHeads 9 | ROI_BOX_HEAD: 10 | CLS_AGNOSTIC_BBOX_REG: True 11 | RPN: 12 | POST_NMS_TOPK_TRAIN: 2000 13 | -------------------------------------------------------------------------------- /configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NAME: CascadeROIHeads 9 | ROI_BOX_HEAD: 10 | CLS_AGNOSTIC_BBOX_REG: True 11 | RPN: 12 | POST_NMS_TOPK_TRAIN: 2000 13 | SOLVER: 14 | STEPS: (210000, 250000) 15 | MAX_ITER: 270000 16 | -------------------------------------------------------------------------------- /configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | MASK_ON: True 4 | WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k" 5 | RESNETS: 6 | STRIDE_IN_1X1: False # this is a C2 model 7 | NUM_GROUPS: 32 8 | WIDTH_PER_GROUP: 8 9 | DEPTH: 152 10 | DEFORM_ON_PER_STAGE: [False, True, True, True] 11 | ROI_HEADS: 12 | NAME: "CascadeROIHeads" 13 | ROI_BOX_HEAD: 14 | NAME: "FastRCNNConvFCHead" 15 | NUM_CONV: 4 16 | NUM_FC: 1 17 | NORM: "GN" 18 | CLS_AGNOSTIC_BBOX_REG: True 19 | ROI_MASK_HEAD: 20 | NUM_CONV: 8 21 | NORM: "GN" 22 | RPN: 23 | POST_NMS_TOPK_TRAIN: 2000 24 | SOLVER: 25 | IMS_PER_BATCH: 128 26 | STEPS: (35000, 45000) 27 | MAX_ITER: 50000 28 | BASE_LR: 0.16 29 | INPUT: 30 | MIN_SIZE_TRAIN: (640, 864) 31 | MIN_SIZE_TRAIN_SAMPLING: "range" 32 | MAX_SIZE_TRAIN: 1440 33 | CROP: 34 | ENABLED: True 35 | TEST: 36 | EVAL_PERIOD: 2500 37 | -------------------------------------------------------------------------------- /configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_BOX_HEAD: 8 | CLS_AGNOSTIC_BBOX_REG: True 9 | ROI_MASK_HEAD: 10 | CLS_AGNOSTIC_MASK: True 11 | -------------------------------------------------------------------------------- /configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 8 | DEFORM_MODULATED: False 9 | -------------------------------------------------------------------------------- /configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 8 | DEFORM_MODULATED: False 9 | SOLVER: 10 | STEPS: (210000, 250000) 11 | MAX_ITER: 270000 12 | -------------------------------------------------------------------------------- /configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-50-GN" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | NORM: "GN" 8 | STRIDE_IN_1X1: False 9 | FPN: 10 | NORM: "GN" 11 | ROI_BOX_HEAD: 12 | NAME: "FastRCNNConvFCHead" 13 | NUM_CONV: 4 14 | NUM_FC: 1 15 | NORM: "GN" 16 | ROI_MASK_HEAD: 17 | NORM: "GN" 18 | SOLVER: 19 | # 3x schedule 20 | STEPS: (210000, 250000) 21 | MAX_ITER: 270000 22 | -------------------------------------------------------------------------------- /configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | NORM: "SyncBN" 8 | STRIDE_IN_1X1: True 9 | FPN: 10 | NORM: "SyncBN" 11 | ROI_BOX_HEAD: 12 | NAME: "FastRCNNConvFCHead" 13 | NUM_CONV: 4 14 | NUM_FC: 1 15 | NORM: "SyncBN" 16 | ROI_MASK_HEAD: 17 | NORM: "SyncBN" 18 | SOLVER: 19 | # 3x schedule 20 | STEPS: (210000, 250000) 21 | MAX_ITER: 270000 22 | TEST: 23 | PRECISE_BN: 24 | ENABLED: True 25 | -------------------------------------------------------------------------------- /configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml: -------------------------------------------------------------------------------- 1 | # A large PanopticFPN for demo purposes. 2 | # Use GN on backbone to support semantic seg. 3 | # Use Cascade + Deform Conv to improve localization. 4 | _BASE_: "../COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml" 5 | MODEL: 6 | WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-101-GN" 7 | RESNETS: 8 | DEPTH: 101 9 | NORM: "GN" 10 | DEFORM_ON_PER_STAGE: [False, True, True, True] 11 | STRIDE_IN_1X1: False 12 | FPN: 13 | NORM: "GN" 14 | ROI_HEADS: 15 | NAME: CascadeROIHeads 16 | ROI_BOX_HEAD: 17 | CLS_AGNOSTIC_BBOX_REG: True 18 | ROI_MASK_HEAD: 19 | NORM: "GN" 20 | RPN: 21 | POST_NMS_TOPK_TRAIN: 2000 22 | SOLVER: 23 | STEPS: (105000, 125000) 24 | MAX_ITER: 135000 25 | IMS_PER_BATCH: 32 26 | BASE_LR: 0.04 27 | -------------------------------------------------------------------------------- /configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" 2 | MODEL: 3 | # Train from random initialization. 4 | WEIGHTS: "" 5 | # It makes sense to divide by STD when training from scratch 6 | # But it seems to make no difference on the results and C2's models didn't do this. 7 | # So we keep things consistent with C2. 8 | # PIXEL_STD: [57.375, 57.12, 58.395] 9 | MASK_ON: True 10 | BACKBONE: 11 | FREEZE_AT: 0 12 | # NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 13 | # to learn what you need for training from scratch. 14 | -------------------------------------------------------------------------------- /configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" 2 | MODEL: 3 | PIXEL_STD: [57.375, 57.12, 58.395] 4 | WEIGHTS: "" 5 | MASK_ON: True 6 | RESNETS: 7 | STRIDE_IN_1X1: False 8 | BACKBONE: 9 | FREEZE_AT: 0 10 | SOLVER: 11 | # 9x schedule 12 | IMS_PER_BATCH: 64 # 4x the standard 13 | STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k 14 | MAX_ITER: 202500 # 90k * 9 / 4 15 | BASE_LR: 0.08 16 | TEST: 17 | EVAL_PERIOD: 2500 18 | # NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 19 | # to learn what you need for training from scratch. 20 | -------------------------------------------------------------------------------- /configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "mask_rcnn_R_50_FPN_3x_syncbn.yaml" 2 | MODEL: 3 | PIXEL_STD: [57.375, 57.12, 58.395] 4 | WEIGHTS: "" 5 | MASK_ON: True 6 | RESNETS: 7 | STRIDE_IN_1X1: False 8 | BACKBONE: 9 | FREEZE_AT: 0 10 | SOLVER: 11 | # 9x schedule 12 | IMS_PER_BATCH: 64 # 4x the standard 13 | STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k 14 | MAX_ITER: 202500 # 90k * 9 / 4 15 | BASE_LR: 0.08 16 | TEST: 17 | EVAL_PERIOD: 2500 18 | # NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 19 | # to learn what you need for training from scratch. 20 | -------------------------------------------------------------------------------- /configs/Misc/semantic_R_50_FPN_1x.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TRAIN: ("coco_2017_train_panoptic_stuffonly",) 9 | TEST: ("coco_2017_val_panoptic_stuffonly",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | -------------------------------------------------------------------------------- /configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 20 9 | INPUT: 10 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 11 | MIN_SIZE_TEST: 800 12 | DATASETS: 13 | TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') 14 | TEST: ('voc_2007_test',) 15 | SOLVER: 16 | STEPS: (12000, 16000) 17 | MAX_ITER: 18000 # 17.4 epochs 18 | WARMUP_ITERS: 100 19 | -------------------------------------------------------------------------------- /configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | NUM_CLASSES: 20 9 | INPUT: 10 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 11 | MIN_SIZE_TEST: 800 12 | DATASETS: 13 | TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') 14 | TEST: ('voc_2007_test',) 15 | SOLVER: 16 | STEPS: (12000, 16000) 17 | MAX_ITER: 18000 # 17.4 epochs 18 | WARMUP_ITERS: 100 19 | -------------------------------------------------------------------------------- /configs/common/README.md: -------------------------------------------------------------------------------- 1 | This directory provides definitions for a few common models, dataloaders, scheduler, 2 | and optimizers that are often used in training. 3 | The definition of these objects are provided in the form of lazy instantiation: 4 | their arguments can be edited by users before constructing the objects. 5 | 6 | They can be imported, or loaded by `model_zoo.get_config` API in users' own configs. 7 | -------------------------------------------------------------------------------- /configs/common/coco_schedule.py: -------------------------------------------------------------------------------- 1 | from fvcore.common.param_scheduler import MultiStepParamScheduler 2 | 3 | from detectron2.config import LazyCall as L 4 | from detectron2.solver import WarmupParamScheduler 5 | 6 | 7 | def default_X_scheduler(num_X): 8 | """ 9 | Returns the config for a default multi-step LR scheduler such as "1x", "3x", 10 | commonly referred to in papers, where every 1x has the total length of 1440k 11 | training images (~12 COCO epochs). LR is decayed twice at the end of training 12 | following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4. 13 | 14 | Args: 15 | num_X: a positive real number 16 | 17 | Returns: 18 | DictConfig: configs that define the multiplier for LR during training 19 | """ 20 | # total number of iterations assuming 16 batch size, using 1440000/16=90000 21 | total_steps_16bs = num_X * 90000 22 | 23 | if num_X <= 2: 24 | scheduler = L(MultiStepParamScheduler)( 25 | values=[1.0, 0.1, 0.01], 26 | # note that scheduler is scale-invariant. This is equivalent to 27 | # milestones=[6, 8, 9] 28 | milestones=[60000, 80000, 90000], 29 | ) 30 | else: 31 | scheduler = L(MultiStepParamScheduler)( 32 | values=[1.0, 0.1, 0.01], 33 | milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs], 34 | ) 35 | return L(WarmupParamScheduler)( 36 | scheduler=scheduler, 37 | warmup_length=1000 / total_steps_16bs, 38 | warmup_method="linear", 39 | warmup_factor=0.001, 40 | ) 41 | 42 | 43 | lr_multiplier_1x = default_X_scheduler(1) 44 | lr_multiplier_2x = default_X_scheduler(2) 45 | lr_multiplier_3x = default_X_scheduler(3) 46 | lr_multiplier_6x = default_X_scheduler(6) 47 | lr_multiplier_9x = default_X_scheduler(9) 48 | -------------------------------------------------------------------------------- /configs/common/data/coco.py: -------------------------------------------------------------------------------- 1 | from omegaconf import OmegaConf 2 | 3 | import detectron2.data.transforms as T 4 | from detectron2.config import LazyCall as L 5 | from detectron2.data import ( 6 | DatasetMapper, 7 | build_detection_test_loader, 8 | build_detection_train_loader, 9 | get_detection_dataset_dicts, 10 | ) 11 | from detectron2.evaluation import COCOEvaluator 12 | 13 | dataloader = OmegaConf.create() 14 | 15 | dataloader.train = L(build_detection_train_loader)( 16 | dataset=L(get_detection_dataset_dicts)(names="coco_2017_train"), 17 | mapper=L(DatasetMapper)( 18 | is_train=True, 19 | augmentations=[ 20 | L(T.ResizeShortestEdge)( 21 | short_edge_length=(640, 672, 704, 736, 768, 800), 22 | sample_style="choice", 23 | max_size=1333, 24 | ), 25 | L(T.RandomFlip)(horizontal=True), 26 | ], 27 | image_format="BGR", 28 | use_instance_mask=True, 29 | ), 30 | total_batch_size=16, 31 | num_workers=4, 32 | ) 33 | 34 | dataloader.test = L(build_detection_test_loader)( 35 | dataset=L(get_detection_dataset_dicts)(names="coco_2017_val", filter_empty=False), 36 | mapper=L(DatasetMapper)( 37 | is_train=False, 38 | augmentations=[ 39 | L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333), 40 | ], 41 | image_format="${...train.mapper.image_format}", 42 | ), 43 | num_workers=4, 44 | ) 45 | 46 | dataloader.evaluator = L(COCOEvaluator)( 47 | dataset_name="${..test.dataset.names}", 48 | ) 49 | -------------------------------------------------------------------------------- /configs/common/data/coco_keypoint.py: -------------------------------------------------------------------------------- 1 | from detectron2.data.detection_utils import create_keypoint_hflip_indices 2 | 3 | from .coco import dataloader 4 | 5 | dataloader.train.dataset.min_keypoints = 1 6 | dataloader.train.dataset.names = "keypoints_coco_2017_train" 7 | dataloader.test.dataset.names = "keypoints_coco_2017_val" 8 | 9 | dataloader.train.mapper.update( 10 | use_instance_mask=False, 11 | use_keypoint=True, 12 | keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names), 13 | ) 14 | -------------------------------------------------------------------------------- /configs/common/data/coco_panoptic_separated.py: -------------------------------------------------------------------------------- 1 | from detectron2.config import LazyCall as L 2 | from detectron2.evaluation import ( 3 | COCOEvaluator, 4 | COCOPanopticEvaluator, 5 | DatasetEvaluators, 6 | SemSegEvaluator, 7 | ) 8 | 9 | from .coco import dataloader 10 | 11 | dataloader.train.dataset.names = "coco_2017_train_panoptic_separated" 12 | dataloader.train.dataset.filter_empty = False 13 | dataloader.test.dataset.names = "coco_2017_val_panoptic_separated" 14 | 15 | 16 | dataloader.evaluator = [ 17 | L(COCOEvaluator)( 18 | dataset_name="${...test.dataset.names}", 19 | ), 20 | L(SemSegEvaluator)( 21 | dataset_name="${...test.dataset.names}", 22 | ), 23 | L(COCOPanopticEvaluator)( 24 | dataset_name="${...test.dataset.names}", 25 | ), 26 | ] 27 | -------------------------------------------------------------------------------- /configs/common/models/cascade_rcnn.py: -------------------------------------------------------------------------------- 1 | from detectron2.config import LazyCall as L 2 | from detectron2.layers import ShapeSpec 3 | from detectron2.modeling.box_regression import Box2BoxTransform 4 | from detectron2.modeling.matcher import Matcher 5 | from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads 6 | 7 | from .mask_rcnn_fpn import model 8 | 9 | # arguments that don't exist for Cascade R-CNN 10 | [model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] 11 | 12 | model.roi_heads.update( 13 | _target_=CascadeROIHeads, 14 | box_heads=[ 15 | L(FastRCNNConvFCHead)( 16 | input_shape=ShapeSpec(channels=256, height=7, width=7), 17 | conv_dims=[], 18 | fc_dims=[1024, 1024], 19 | ) 20 | for k in range(3) 21 | ], 22 | box_predictors=[ 23 | L(FastRCNNOutputLayers)( 24 | input_shape=ShapeSpec(channels=1024), 25 | test_score_thresh=0.05, 26 | box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), 27 | cls_agnostic_bbox_reg=True, 28 | num_classes="${...num_classes}", 29 | ) 30 | for (w1, w2) in [(10, 5), (20, 10), (30, 15)] 31 | ], 32 | proposal_matchers=[ 33 | L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) 34 | for th in [0.5, 0.6, 0.7] 35 | ], 36 | ) 37 | -------------------------------------------------------------------------------- /configs/common/models/fcos.py: -------------------------------------------------------------------------------- 1 | from detectron2.modeling.meta_arch.fcos import FCOS, FCOSHead 2 | 3 | from .retinanet import model 4 | 5 | model._target_ = FCOS 6 | 7 | del model.anchor_generator 8 | del model.box2box_transform 9 | del model.anchor_matcher 10 | del model.input_format 11 | 12 | # Use P5 instead of C5 to compute P6/P7 13 | # (Sec 2.2 of https://arxiv.org/abs/2006.09214) 14 | model.backbone.top_block.in_feature = "p5" 15 | model.backbone.top_block.in_channels = 256 16 | 17 | # New score threshold determined based on sqrt(cls_score * centerness) 18 | model.test_score_thresh = 0.2 19 | model.test_nms_thresh = 0.6 20 | 21 | model.head._target_ = FCOSHead 22 | del model.head.num_anchors 23 | model.head.norm = "GN" 24 | -------------------------------------------------------------------------------- /configs/common/models/keypoint_rcnn_fpn.py: -------------------------------------------------------------------------------- 1 | from detectron2.config import LazyCall as L 2 | from detectron2.layers import ShapeSpec 3 | from detectron2.modeling.poolers import ROIPooler 4 | from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead 5 | 6 | from .mask_rcnn_fpn import model 7 | 8 | [model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]] 9 | 10 | model.roi_heads.update( 11 | num_classes=1, 12 | keypoint_in_features=["p2", "p3", "p4", "p5"], 13 | keypoint_pooler=L(ROIPooler)( 14 | output_size=14, 15 | scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), 16 | sampling_ratio=0, 17 | pooler_type="ROIAlignV2", 18 | ), 19 | keypoint_head=L(KRCNNConvDeconvUpsampleHead)( 20 | input_shape=ShapeSpec(channels=256, width=14, height=14), 21 | num_keypoints=17, 22 | conv_dims=[512] * 8, 23 | loss_normalizer="visible", 24 | ), 25 | ) 26 | 27 | # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. 28 | # 1000 proposals per-image is found to hurt box AP. 29 | # Therefore we increase it to 1500 per-image. 30 | model.proposal_generator.post_nms_topk = (1500, 1000) 31 | 32 | # Keypoint AP degrades (though box AP improves) when using plain L1 loss 33 | model.roi_heads.box_predictor.smooth_l1_beta = 0.5 34 | -------------------------------------------------------------------------------- /configs/common/models/panoptic_fpn.py: -------------------------------------------------------------------------------- 1 | from detectron2.config import LazyCall as L 2 | from detectron2.layers import ShapeSpec 3 | from detectron2.modeling import PanopticFPN 4 | from detectron2.modeling.meta_arch.semantic_seg import SemSegFPNHead 5 | 6 | from .mask_rcnn_fpn import model 7 | 8 | model._target_ = PanopticFPN 9 | model.sem_seg_head = L(SemSegFPNHead)( 10 | input_shape={ 11 | f: L(ShapeSpec)(stride=s, channels="${....backbone.out_channels}") 12 | for f, s in zip(["p2", "p3", "p4", "p5"], [4, 8, 16, 32]) 13 | }, 14 | ignore_value=255, 15 | num_classes=54, # COCO stuff + 1 16 | conv_dims=128, 17 | common_stride=4, 18 | loss_weight=0.5, 19 | norm="GN", 20 | ) 21 | -------------------------------------------------------------------------------- /configs/common/optim.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from detectron2.config import LazyCall as L 4 | from detectron2.solver.build import get_default_optimizer_params 5 | 6 | SGD = L(torch.optim.SGD)( 7 | params=L(get_default_optimizer_params)( 8 | # params.model is meant to be set to the model object, before instantiating 9 | # the optimizer. 10 | weight_decay_norm=0.0 11 | ), 12 | lr=0.02, 13 | momentum=0.9, 14 | weight_decay=1e-4, 15 | ) 16 | -------------------------------------------------------------------------------- /configs/common/train.py: -------------------------------------------------------------------------------- 1 | # Common training-related configs that are designed for "tools/lazyconfig_train_net.py" 2 | # You can use your own instead, together with your own train_net.py 3 | train = dict( 4 | output_dir="./output", 5 | init_checkpoint="", 6 | max_iter=90000, 7 | amp=dict(enabled=False), # options for Automatic Mixed Precision 8 | ddp=dict( # options for DistributedDataParallel 9 | broadcast_buffers=False, 10 | find_unused_parameters=False, 11 | fp16_compression=False, 12 | ), 13 | checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer 14 | eval_period=5000, 15 | log_period=20, 16 | device="cuda" 17 | # ... 18 | ) 19 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_50_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | model.backbone.bottom_up.stages.depth = 101 10 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_101_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 2 # 100ep -> 200ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 2 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_101_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 4 # 100ep -> 400ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 4 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_50_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 2 # 100ep -> 200ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 2 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_50_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 4 # 100ep -> 400ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 4 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_50_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter //= 2 # 100ep -> 50ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone // 2 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_50_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | from detectron2.config import LazyCall as L 9 | from detectron2.modeling.backbone import RegNet 10 | from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock 11 | 12 | # Config source: 13 | # https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa 14 | model.backbone.bottom_up = L(RegNet)( 15 | stem_class=SimpleStem, 16 | stem_width=32, 17 | block_class=ResBottleneckBlock, 18 | depth=23, 19 | w_a=38.65, 20 | w_0=96, 21 | w_m=2.43, 22 | group_width=40, 23 | norm="SyncBN", 24 | out_features=["s1", "s2", "s3", "s4"], 25 | ) 26 | model.pixel_std = [57.375, 57.120, 58.395] 27 | 28 | # RegNets benefit from enabling cudnn benchmark mode 29 | train.cudnn_benchmark = True 30 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 2 # 100ep -> 200ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 2 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 4 # 100ep -> 400ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 4 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_R_50_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | from detectron2.config import LazyCall as L 9 | from detectron2.modeling.backbone import RegNet 10 | from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock 11 | 12 | # Config source: 13 | # https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py # noqa 14 | model.backbone.bottom_up = L(RegNet)( 15 | stem_class=SimpleStem, 16 | stem_width=32, 17 | block_class=ResBottleneckBlock, 18 | depth=22, 19 | w_a=31.41, 20 | w_0=96, 21 | w_m=2.24, 22 | group_width=64, 23 | se_ratio=0.25, 24 | norm="SyncBN", 25 | out_features=["s1", "s2", "s3", "s4"], 26 | ) 27 | model.pixel_std = [57.375, 57.120, 58.395] 28 | 29 | # RegNets benefit from enabling cudnn benchmark mode 30 | train.cudnn_benchmark = True 31 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 2 # 100ep -> 200ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 2 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py: -------------------------------------------------------------------------------- 1 | from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import ( 2 | dataloader, 3 | lr_multiplier, 4 | model, 5 | optimizer, 6 | train, 7 | ) 8 | 9 | train.max_iter *= 4 # 100ep -> 400ep 10 | 11 | lr_multiplier.scheduler.milestones = [ 12 | milestone * 4 for milestone in lr_multiplier.scheduler.milestones 13 | ] 14 | lr_multiplier.scheduler.num_updates = train.max_iter 15 | -------------------------------------------------------------------------------- /configs/quick_schedules/README.md: -------------------------------------------------------------------------------- 1 | These are quick configs for performance or accuracy regression tracking purposes. 2 | 3 | * `*instance_test.yaml`: can train on 2 GPUs. They are used to test whether the training can 4 | successfully finish. They are not expected to produce reasonable training results. 5 | * `*inference_acc_test.yaml`: They should be run using `--eval-only`. They run inference using pre-trained models and verify 6 | the results are as expected. 7 | * `*training_acc_test.yaml`: They should be trained on 8 GPUs. They finish in about an hour and verify the training accuracy 8 | is within the normal range. 9 | -------------------------------------------------------------------------------- /configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://Misc/cascade_mask_rcnn_R_50_FPN_3x/144998488/model_final_480dd8.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 50.18, 0.02], ["segm", "AP", 43.87, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" 2 | DATASETS: 3 | TRAIN: ("coco_2017_val_100",) 4 | TEST: ("coco_2017_val_100",) 5 | SOLVER: 6 | BASE_LR: 0.005 7 | STEPS: (30,) 8 | MAX_ITER: 40 9 | IMS_PER_BATCH: 4 10 | DATALOADER: 11 | NUM_WORKERS: 2 12 | -------------------------------------------------------------------------------- /configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/model_final_e5f7ce.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 45.70, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | DATASETS: 5 | TRAIN: ("coco_2017_val_100",) 6 | PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) 7 | TEST: ("coco_2017_val_100",) 8 | PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) 9 | SOLVER: 10 | BASE_LR: 0.005 11 | STEPS: (30,) 12 | MAX_ITER: 40 13 | IMS_PER_BATCH: 4 14 | DATALOADER: 15 | NUM_WORKERS: 2 16 | -------------------------------------------------------------------------------- /configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl" 4 | DATASETS: 5 | TEST: ("keypoints_coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 52.47, 0.02], ["keypoints", "AP", 67.36, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | ROI_HEADS: 6 | NUM_CLASSES: 1 7 | DATASETS: 8 | TRAIN: ("keypoints_coco_2017_val_100",) 9 | TEST: ("keypoints_coco_2017_val_100",) 10 | SOLVER: 11 | BASE_LR: 0.005 12 | STEPS: (30,) 13 | MAX_ITER: 40 14 | IMS_PER_BATCH: 4 15 | DATALOADER: 16 | NUM_WORKERS: 2 17 | -------------------------------------------------------------------------------- /configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | BATCH_SIZE_PER_IMAGE: 256 9 | NUM_CLASSES: 1 10 | ROI_KEYPOINT_HEAD: 11 | POOLER_RESOLUTION: 14 12 | POOLER_SAMPLING_RATIO: 2 13 | NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: False 14 | LOSS_WEIGHT: 4.0 15 | ROI_BOX_HEAD: 16 | SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss 17 | RPN: 18 | SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss 19 | DATASETS: 20 | TRAIN: ("keypoints_coco_2017_val",) 21 | TEST: ("keypoints_coco_2017_val",) 22 | INPUT: 23 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 24 | SOLVER: 25 | WARMUP_FACTOR: 0.33333333 26 | WARMUP_ITERS: 100 27 | STEPS: (5500, 5800) 28 | MAX_ITER: 6000 29 | TEST: 30 | EXPECTED_RESULTS: [["bbox", "AP", 55.35, 1.0], ["keypoints", "AP", 76.91, 1.0]] 31 | -------------------------------------------------------------------------------- /configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | KEYPOINT_ON: True 5 | RESNETS: 6 | DEPTH: 50 7 | ROI_HEADS: 8 | BATCH_SIZE_PER_IMAGE: 256 9 | NUM_CLASSES: 1 10 | ROI_KEYPOINT_HEAD: 11 | POOLER_RESOLUTION: 14 12 | POOLER_SAMPLING_RATIO: 2 13 | ROI_BOX_HEAD: 14 | SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss 15 | RPN: 16 | SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss 17 | DATASETS: 18 | TRAIN: ("keypoints_coco_2017_val",) 19 | TEST: ("keypoints_coco_2017_val",) 20 | INPUT: 21 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 22 | SOLVER: 23 | WARMUP_FACTOR: 0.33333333 24 | WARMUP_ITERS: 100 25 | STEPS: (5500, 5800) 26 | MAX_ITER: 6000 27 | TEST: 28 | EXPECTED_RESULTS: [["bbox", "AP", 53.5, 1.0], ["keypoints", "AP", 72.4, 1.0]] 29 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | DATASETS: 6 | TRAIN: ("coco_2017_val_100",) 7 | TEST: ("coco_2017_val_100",) 8 | SOLVER: 9 | BASE_LR: 0.001 10 | STEPS: (30,) 11 | MAX_ITER: 40 12 | IMS_PER_BATCH: 4 13 | CLIP_GRADIENTS: 14 | ENABLED: True 15 | CLIP_TYPE: "value" 16 | CLIP_VALUE: 1.0 17 | DATALOADER: 18 | NUM_WORKERS: 2 19 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/model_final_4ce675.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 47.37, 0.02], ["segm", "AP", 40.99, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | DATASETS: 6 | TRAIN: ("coco_2017_val_100",) 7 | TEST: ("coco_2017_val_100",) 8 | SOLVER: 9 | BASE_LR: 0.001 10 | STEPS: (30,) 11 | MAX_ITER: 40 12 | IMS_PER_BATCH: 4 13 | DATALOADER: 14 | NUM_WORKERS: 2 15 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-C4.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | ROI_HEADS: 5 | BATCH_SIZE_PER_IMAGE: 256 6 | MASK_ON: True 7 | DATASETS: 8 | TRAIN: ("coco_2017_val",) 9 | TEST: ("coco_2017_val",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (600,) 12 | MAX_SIZE_TRAIN: 1000 13 | MIN_SIZE_TEST: 800 14 | MAX_SIZE_TEST: 1000 15 | SOLVER: 16 | IMS_PER_BATCH: 8 # base uses 16 17 | WARMUP_FACTOR: 0.33333 18 | WARMUP_ITERS: 100 19 | STEPS: (11000, 11600) 20 | MAX_ITER: 12000 21 | TEST: 22 | EXPECTED_RESULTS: [["bbox", "AP", 41.88, 0.7], ["segm", "AP", 33.79, 0.5]] 23 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 47.44, 0.02], ["segm", "AP", 42.94, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 47.34, 0.02], ["segm", "AP", 42.67, 0.02], ["bbox_TTA", "AP", 49.11, 0.02], ["segm_TTA", "AP", 45.04, 0.02]] 8 | AUG: 9 | ENABLED: True 10 | MIN_SIZES: (700, 800) # to save some time 11 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: True 5 | DATASETS: 6 | TRAIN: ("coco_2017_val_100",) 7 | TEST: ("coco_2017_val_100",) 8 | SOLVER: 9 | BASE_LR: 0.005 10 | STEPS: (30,) 11 | MAX_ITER: 40 12 | IMS_PER_BATCH: 4 13 | DATALOADER: 14 | NUM_WORKERS: 2 15 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "./mask_rcnn_R_50_FPN_training_acc_test.yaml" 2 | MODEL: 3 | ROI_BOX_HEAD: 4 | TRAIN_ON_PRED_BOXES: True 5 | TEST: 6 | EXPECTED_RESULTS: [["bbox", "AP", 42.6, 1.0], ["segm", "AP", 35.8, 0.8]] 7 | -------------------------------------------------------------------------------- /configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | ROI_HEADS: 5 | BATCH_SIZE_PER_IMAGE: 256 6 | MASK_ON: True 7 | DATASETS: 8 | TRAIN: ("coco_2017_val",) 9 | TEST: ("coco_2017_val",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (600,) 12 | MAX_SIZE_TRAIN: 1000 13 | MIN_SIZE_TEST: 800 14 | MAX_SIZE_TEST: 1000 15 | SOLVER: 16 | WARMUP_FACTOR: 0.3333333 17 | WARMUP_ITERS: 100 18 | STEPS: (5500, 5800) 19 | MAX_ITER: 6000 20 | TEST: 21 | EXPECTED_RESULTS: [["bbox", "AP", 42.5, 1.0], ["segm", "AP", 35.8, 0.8]] 22 | -------------------------------------------------------------------------------- /configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100_panoptic_separated",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 46.47, 0.02], ["segm", "AP", 43.39, 0.02], ["sem_seg", "mIoU", 42.55, 0.02], ["panoptic_seg", "PQ", 38.99, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "PanopticFPN" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: True 6 | RESNETS: 7 | DEPTH: 50 8 | SEM_SEG_HEAD: 9 | LOSS_WEIGHT: 0.5 10 | DATASETS: 11 | TRAIN: ("coco_2017_val_100_panoptic_separated",) 12 | TEST: ("coco_2017_val_100_panoptic_separated",) 13 | SOLVER: 14 | BASE_LR: 0.005 15 | STEPS: (30,) 16 | MAX_ITER: 40 17 | IMS_PER_BATCH: 4 18 | DATALOADER: 19 | NUM_WORKERS: 1 20 | -------------------------------------------------------------------------------- /configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "PanopticFPN" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | MASK_ON: True 6 | RESNETS: 7 | DEPTH: 50 8 | SEM_SEG_HEAD: 9 | LOSS_WEIGHT: 0.5 10 | DATASETS: 11 | TRAIN: ("coco_2017_val_panoptic_separated",) 12 | TEST: ("coco_2017_val_panoptic_separated",) 13 | SOLVER: 14 | BASE_LR: 0.01 15 | WARMUP_FACTOR: 0.001 16 | WARMUP_ITERS: 500 17 | STEPS: (5500,) 18 | MAX_ITER: 7000 19 | TEST: 20 | EXPECTED_RESULTS: [["bbox", "AP", 46.70, 1.1], ["segm", "AP", 39.0, 0.7], ["sem_seg", "mIoU", 64.73, 1.3], ["panoptic_seg", "PQ", 48.13, 0.8]] 21 | -------------------------------------------------------------------------------- /configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/retinanet_R_50_FPN_3x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Detection/retinanet_R_50_FPN_3x/190397829/model_final_5bd44e.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["bbox", "AP", 44.45, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/retinanet_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | DATASETS: 5 | TRAIN: ("coco_2017_val_100",) 6 | TEST: ("coco_2017_val_100",) 7 | SOLVER: 8 | BASE_LR: 0.005 9 | STEPS: (30,) 10 | MAX_ITER: 40 11 | IMS_PER_BATCH: 4 12 | DATALOADER: 13 | NUM_WORKERS: 2 14 | -------------------------------------------------------------------------------- /configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl" 4 | DATASETS: 5 | TEST: ("coco_2017_val_100",) 6 | TEST: 7 | EXPECTED_RESULTS: [["box_proposals", "AR@1000", 58.16, 0.02]] 8 | -------------------------------------------------------------------------------- /configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" 2 | MODEL: 3 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | DATASETS: 5 | TRAIN: ("coco_2017_val_100",) 6 | TEST: ("coco_2017_val_100",) 7 | SOLVER: 8 | STEPS: (30,) 9 | MAX_ITER: 40 10 | BASE_LR: 0.005 11 | IMS_PER_BATCH: 4 12 | DATALOADER: 13 | NUM_WORKERS: 2 14 | -------------------------------------------------------------------------------- /configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://semantic_R_50_FPN_1x/111802073/model_final_c18079783c55a94968edc28b7101c5f0.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TEST: ("coco_2017_val_100_panoptic_stuffonly",) 9 | TEST: 10 | EXPECTED_RESULTS: [["sem_seg", "mIoU", 39.53, 0.02], ["sem_seg", "mACC", 51.50, 0.02]] 11 | -------------------------------------------------------------------------------- /configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TRAIN: ("coco_2017_val_100_panoptic_stuffonly",) 9 | TEST: ("coco_2017_val_100_panoptic_stuffonly",) 10 | INPUT: 11 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 12 | SOLVER: 13 | BASE_LR: 0.005 14 | STEPS: (30,) 15 | MAX_ITER: 40 16 | IMS_PER_BATCH: 4 17 | DATALOADER: 18 | NUM_WORKERS: 2 19 | -------------------------------------------------------------------------------- /configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-RCNN-FPN.yaml" 2 | MODEL: 3 | META_ARCHITECTURE: "SemanticSegmentor" 4 | WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 5 | RESNETS: 6 | DEPTH: 50 7 | DATASETS: 8 | TRAIN: ("coco_2017_val_panoptic_stuffonly",) 9 | TEST: ("coco_2017_val_panoptic_stuffonly",) 10 | SOLVER: 11 | BASE_LR: 0.01 12 | WARMUP_FACTOR: 0.001 13 | WARMUP_ITERS: 300 14 | STEPS: (5500,) 15 | MAX_ITER: 7000 16 | TEST: 17 | EXPECTED_RESULTS: [["sem_seg", "mIoU", 76.51, 1.0], ["sem_seg", "mACC", 83.25, 1.0]] 18 | INPUT: 19 | # no scale augmentation 20 | MIN_SIZE_TRAIN: (800, ) 21 | -------------------------------------------------------------------------------- /conversion/convert_mix_ref.py: -------------------------------------------------------------------------------- 1 | """https://github.com/MasterBin-IIAU/UNINEXT""" 2 | import os 3 | import json 4 | 5 | if __name__ == "__main__": 6 | for dataset in ["refcoco-unc", "refcocog-umd", "refcocoplus-unc"]: 7 | json_path = "datasets/coco2014/annotations/%s/instances.json" % dataset 8 | os.system("python3 conversion/convert_ref2coco.py --src_json %s --des_json %s" %(json_path, json_path)) 9 | # merge train split 10 | merged_dir = "datasets/coco2014/annotations/refcoco-mixed" 11 | if not os.path.exists(merged_dir): 12 | os.makedirs(merged_dir) 13 | merged_json = "datasets/coco2014/annotations/refcoco-mixed/instances_train.json" 14 | inst_idx = 0 # index of the instance 15 | new_data = {"images": [], "annotations": [], "categories": [{"supercategory": "object","id": 1,"name": "object"}]} 16 | for dataset in ["refcoco-unc", "refcocog-umd", "refcocoplus-unc"]: 17 | json_path = "datasets/annotations/%s/instances_train.json" % dataset 18 | data = json.load(open(json_path, 'r')) 19 | # for split in data.keys(): 20 | for (img, anno) in zip(data["images"], data["annotations"]): 21 | inst_idx += 1 22 | img["id"] = inst_idx 23 | anno["image_id"] = inst_idx 24 | anno["id"] = inst_idx 25 | new_data["images"].append(img) 26 | new_data["annotations"].append(anno) 27 | json.dump(new_data, open(merged_json, 'w')) # 126908 referred objects -------------------------------------------------------------------------------- /conversion/convert_ref2cocovid.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | 4 | def parse_args(): 5 | parser = argparse.ArgumentParser("image to video converter") 6 | parser.add_argument("--src_json", default="datasets/coco2014/annotations/refcoco-mixed/instances_train.json", type=str, help="") 7 | parser.add_argument("--des_json", default="datasets/coco2014/annotations/refcoco-mixed/instances_train_video.json", type=str, help="") 8 | return parser.parse_args() 9 | 10 | if __name__ == "__main__": 11 | args = parse_args() 12 | src_dataset = json.load(open(args.src_json, 'r')) 13 | des_dataset = {'videos':[], 'categories':[], 'annotations':[]} 14 | des_dataset["categories"] = src_dataset["categories"] 15 | # videos 16 | for img_dict in src_dataset["images"]: 17 | vid_dict = {} 18 | vid_dict["length"] = 1 19 | vid_dict["file_names"] = [img_dict["file_name"]] 20 | vid_dict["width"], vid_dict["height"], vid_dict["id"] = img_dict["width"], img_dict["height"], img_dict["id"] 21 | vid_dict["expressions"] = img_dict["expressions"] 22 | des_dataset["videos"].append(vid_dict) 23 | # annotations 24 | for anno_dict in src_dataset["annotations"]: 25 | anno_dict_new = {} 26 | anno_dict_new["iscrowd"], anno_dict_new["category_id"], anno_dict_new["id"] = \ 27 | anno_dict["iscrowd"], anno_dict["category_id"], anno_dict["id"] 28 | anno_dict_new["video_id"] = anno_dict["image_id"] 29 | anno_dict_new["bboxes"] = [anno_dict["bbox"]] 30 | if "segmentation" in anno_dict: 31 | anno_dict_new["segmentations"] = [anno_dict["segmentation"]] 32 | anno_dict_new["areas"] = [anno_dict["area"]] 33 | des_dataset["annotations"].append(anno_dict_new) 34 | # save 35 | with open(args.des_json, "w") as f: 36 | json.dump(des_dataset, f) 37 | -------------------------------------------------------------------------------- /conversion/convert_vg2coco.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | 4 | def parse_args(): 5 | parser = argparse.ArgumentParser("image to video converter") 6 | parser.add_argument("--src_json", default="datasets/coco2014/annotations/pretraining-vg/instances.json", type=str, help="") 7 | parser.add_argument("--des_json", default="datasets/visual_genome/instances_vg.json", type=str, help="") 8 | return parser.parse_args() 9 | 10 | if __name__ == "__main__": 11 | args = parse_args() 12 | src_dataset = json.load(open(args.src_json, 'r'))["train"] 13 | src_vg = [] 14 | for anno_dict in src_dataset: 15 | if anno_dict["data_source"] == "vg": 16 | src_vg.append(anno_dict) 17 | 18 | des_dataset = {'images':[], 'categories':[{"supercategory": "object","id": 1,"name": "object"}], 'annotations':[]} 19 | 20 | # append videos and annotations 21 | anno_id = 0 22 | for anno_dict in src_vg: 23 | anno_id += 1 24 | # images 25 | img_dict = {} 26 | img_dict["file_name"] = str(anno_dict["image_id"]) + ".jpg" 27 | img_dict["width"], img_dict["height"], img_dict["id"] = anno_dict["width"], anno_dict["height"], anno_id 28 | img_dict["expressions"] = anno_dict["expressions"] 29 | des_dataset["images"].append(img_dict) 30 | 31 | # annotations 32 | anno_dict_new = {} 33 | anno_dict_new["iscrowd"], anno_dict_new["category_id"], anno_dict_new["id"] = \ 34 | 0, 1, anno_id 35 | anno_dict_new["image_id"] = anno_id 36 | anno_dict_new["bbox"] = anno_dict["bbox"] # x1, y1, w, h 37 | anno_dict_new["areas"] = anno_dict["bbox"][-2] * anno_dict["bbox"][-1] 38 | des_dataset["annotations"].append(anno_dict_new) 39 | # save 40 | with open(args.des_json, "w") as f: 41 | json.dump(des_dataset, f) 42 | -------------------------------------------------------------------------------- /conversion/convert_vg2cocovid.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | 4 | def parse_args(): 5 | parser = argparse.ArgumentParser("image to video converter") 6 | parser.add_argument("--src_json", default="datasets/coco2014/annotations/pretraining-vg/instances.json", type=str, help="") 7 | parser.add_argument("--des_json", default="datasets/visual_genome/instances_vg_video.json", type=str, help="") 8 | return parser.parse_args() 9 | 10 | if __name__ == "__main__": 11 | args = parse_args() 12 | src_dataset = json.load(open(args.src_json, 'r'))["train"] 13 | src_vg = [] 14 | for anno_dict in src_dataset: 15 | if anno_dict["data_source"] == "vg": 16 | src_vg.append(anno_dict) 17 | 18 | des_dataset = {'videos':[], 'categories':[{"supercategory": "object","id": 1,"name": "object"}], 'annotations':[]} 19 | 20 | # append videos and annotations 21 | anno_id = 0 22 | for anno_dict in src_vg: 23 | anno_id += 1 24 | # videos 25 | vid_dict = {} 26 | vid_dict["length"] = 1 27 | vid_dict["file_names"] = [str(anno_dict["image_id"]) + ".jpg"] 28 | vid_dict["width"], vid_dict["height"], vid_dict["id"] = anno_dict["width"], anno_dict["height"], anno_id 29 | vid_dict["expressions"] = anno_dict["expressions"] 30 | des_dataset["videos"].append(vid_dict) 31 | 32 | # annotations 33 | anno_dict_new = {} 34 | anno_dict_new["iscrowd"], anno_dict_new["category_id"], anno_dict_new["id"] = \ 35 | 0, 1, anno_id 36 | anno_dict_new["video_id"] = anno_id 37 | anno_dict_new["bboxes"] = [anno_dict["bbox"]] # x1, y1, w, h 38 | anno_dict_new["areas"] = [anno_dict["bbox"][-2] * anno_dict["bbox"][-1]] 39 | des_dataset["annotations"].append(anno_dict_new) 40 | # save 41 | with open(args.des_json, "w") as f: 42 | json.dump(des_dataset, f) 43 | -------------------------------------------------------------------------------- /conversion/models/convert_pth2pkl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | import pickle as pkl 5 | import sys 6 | 7 | import torch 8 | 9 | """ 10 | Usage: 11 | # download pretrained swin model: 12 | wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth 13 | # run the conversion 14 | ./convert-pretrained-model-to-d2.py swin_tiny_patch4_window7_224.pth swin_tiny_patch4_window7_224.pkl 15 | # Then, use swin_tiny_patch4_window7_224.pkl with the following changes in config: 16 | MODEL: 17 | WEIGHTS: "/path/to/swin_tiny_patch4_window7_224.pkl" 18 | INPUT: 19 | FORMAT: "RGB" 20 | """ 21 | 22 | if __name__ == "__main__": 23 | input = sys.argv[1] 24 | 25 | obj = torch.load(input, map_location="cpu")["model"] 26 | 27 | res = {"model": obj, "__author__": "third_party", "matching_heuristics": True} 28 | 29 | with open(sys.argv[2], "wb") as f: 30 | pkl.dump(res, f) 31 | -------------------------------------------------------------------------------- /datasets/prepare_ade20k_sem_seg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | import numpy as np 5 | import os 6 | from pathlib import Path 7 | import tqdm 8 | from PIL import Image 9 | 10 | 11 | def convert(input, output): 12 | img = np.asarray(Image.open(input)) 13 | assert img.dtype == np.uint8 14 | img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1 15 | Image.fromarray(img).save(output) 16 | 17 | 18 | if __name__ == "__main__": 19 | dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016" 20 | for name in ["training", "validation"]: 21 | annotation_dir = dataset_dir / "annotations" / name 22 | output_dir = dataset_dir / "annotations_detectron2" / name 23 | output_dir.mkdir(parents=True, exist_ok=True) 24 | for file in tqdm.tqdm(list(annotation_dir.iterdir())): 25 | output_file = output_dir / file.name 26 | convert(file, output_file) 27 | -------------------------------------------------------------------------------- /datasets/prepare_for_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # Download the mini dataset (coco val2017_100, with only 100 images) 5 | # to be used in unittests & integration tests. 6 | 7 | cd "${0%/*}" 8 | 9 | BASE=https://dl.fbaipublicfiles.com/detectron2 10 | ROOT=${DETECTRON2_DATASETS:-./} 11 | ROOT=${ROOT/#\~/$HOME} # expand ~ to HOME 12 | mkdir -p $ROOT/coco/annotations 13 | 14 | for anno in instances_val2017_100 \ 15 | person_keypoints_val2017_100 ; do 16 | 17 | dest=$ROOT/coco/annotations/$anno.json 18 | [[ -s $dest ]] && { 19 | echo "$dest exists. Skipping ..." 20 | } || { 21 | wget $BASE/annotations/coco/$anno.json -O $dest 22 | } 23 | done 24 | 25 | dest=$ROOT/coco/val2017_100.tgz 26 | [[ -d $ROOT/coco/val2017 ]] && { 27 | echo "$ROOT/coco/val2017 exists. Skipping ..." 28 | } || { 29 | wget $BASE/annotations/coco/val2017_100.tgz -O $dest 30 | tar xzf $dest -C $ROOT/coco/ && rm -f $dest 31 | } 32 | -------------------------------------------------------------------------------- /demo/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Detectron2 Demo 3 | 4 | We provide a command line tool to run a simple demo of builtin configs. 5 | The usage is explained in [GETTING_STARTED.md](../GETTING_STARTED.md). 6 | 7 | See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-) 8 | for a high-quality demo generated with this tool. 9 | -------------------------------------------------------------------------------- /detectron2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from .utils.env import setup_environment 4 | 5 | setup_environment() 6 | 7 | 8 | # This line will be programatically read/write by setup.py. 9 | # Leave them at the bottom of this file and don't touch them. 10 | __version__ = "0.6" 11 | -------------------------------------------------------------------------------- /detectron2/checkpoint/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # File: 4 | 5 | 6 | from . import catalog as _UNUSED # register the handler 7 | from .detection_checkpoint import DetectionCheckpointer 8 | from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer 9 | 10 | __all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] 11 | -------------------------------------------------------------------------------- /detectron2/config/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .compat import downgrade_config, upgrade_config 3 | from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable 4 | from .instantiate import instantiate 5 | from .lazy import LazyCall, LazyConfig 6 | 7 | __all__ = [ 8 | "CfgNode", 9 | "get_cfg", 10 | "global_cfg", 11 | "set_global_cfg", 12 | "downgrade_config", 13 | "upgrade_config", 14 | "configurable", 15 | "instantiate", 16 | "LazyCall", 17 | "LazyConfig", 18 | ] 19 | 20 | 21 | from detectron2.utils.env import fixup_module_metadata 22 | 23 | fixup_module_metadata(__name__, globals(), __all__) 24 | del fixup_module_metadata 25 | -------------------------------------------------------------------------------- /detectron2/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from . import transforms # isort:skip 3 | 4 | from .build import ( 5 | build_batch_data_loader, 6 | build_detection_test_loader, 7 | build_detection_train_loader, 8 | get_detection_dataset_dicts, 9 | load_proposals_into_dataset, 10 | print_instances_class_histogram, 11 | ) 12 | from .catalog import DatasetCatalog, MetadataCatalog, Metadata 13 | from .common import DatasetFromList, MapDataset, ToIterableDataset 14 | from .dataset_mapper import DatasetMapper 15 | 16 | # ensure the builtin datasets are registered 17 | from . import datasets, samplers # isort:skip 18 | 19 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 20 | -------------------------------------------------------------------------------- /detectron2/data/datasets/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Common Datasets 4 | 5 | The dataset implemented here do not need to load the data into the final format. 6 | It should provide the minimal data structure needed to use the dataset, so it can be very efficient. 7 | 8 | For example, for an image dataset, just provide the file names and labels, but don't read the images. 9 | Let the downstream decide how to read. 10 | -------------------------------------------------------------------------------- /detectron2/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json 3 | from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated 4 | from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta 5 | from .pascal_voc import load_voc_instances, register_pascal_voc 6 | from . import builtin as _builtin # ensure the builtin datasets are registered 7 | 8 | 9 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 10 | -------------------------------------------------------------------------------- /detectron2/data/datasets/register_coco.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .coco import register_coco_instances # noqa 3 | from .coco_panoptic import register_coco_panoptic_separated # noqa 4 | -------------------------------------------------------------------------------- /detectron2/data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .distributed_sampler import ( 3 | InferenceSampler, 4 | RandomSubsetTrainingSampler, 5 | RepeatFactorTrainingSampler, 6 | TrainingSampler, 7 | ) 8 | 9 | from .grouped_batch_sampler import GroupedBatchSampler 10 | 11 | __all__ = [ 12 | "GroupedBatchSampler", 13 | "TrainingSampler", 14 | "RandomSubsetTrainingSampler", 15 | "InferenceSampler", 16 | "RepeatFactorTrainingSampler", 17 | ] 18 | -------------------------------------------------------------------------------- /detectron2/data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from fvcore.transforms.transform import Transform, TransformList # order them first 3 | from fvcore.transforms.transform import * 4 | from .transform import * 5 | from .augmentation import * 6 | from .augmentation_impl import * 7 | 8 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 9 | 10 | 11 | from detectron2.utils.env import fixup_module_metadata 12 | 13 | fixup_module_metadata(__name__, globals(), __all__) 14 | del fixup_module_metadata 15 | -------------------------------------------------------------------------------- /detectron2/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from .launch import * 4 | from .train_loop import * 5 | 6 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 7 | 8 | 9 | # prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) 10 | # but still make them available here 11 | from .hooks import * 12 | from .defaults import * 13 | -------------------------------------------------------------------------------- /detectron2/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator 3 | from .coco_evaluation import COCOEvaluator 4 | from .rotated_coco_evaluation import RotatedCOCOEvaluator 5 | from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset 6 | from .lvis_evaluation import LVISEvaluator 7 | from .panoptic_evaluation import COCOPanopticEvaluator 8 | from .pascal_voc_evaluation import PascalVOCDetectionEvaluator 9 | from .sem_seg_evaluation import SemSegEvaluator 10 | from .testing import print_csv_format, verify_results 11 | 12 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 13 | -------------------------------------------------------------------------------- /detectron2/export/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains code to prepare a detectron2 model for deployment. 3 | Currently it supports exporting a detectron2 model to Caffe2 format through ONNX. 4 | 5 | Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. 6 | 7 | 8 | ### Acknowledgements 9 | 10 | Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools. 11 | 12 | Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who 13 | help export Detectron2 models to TorchScript. 14 | -------------------------------------------------------------------------------- /detectron2/export/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | try: 4 | from caffe2.proto import caffe2_pb2 as _tmp 5 | 6 | # caffe2 is optional 7 | except ImportError: 8 | pass 9 | else: 10 | from .api import * 11 | 12 | from .flatten import TracingAdapter 13 | from .torchscript import scripting_with_instances, dump_torchscript_IR 14 | 15 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 16 | -------------------------------------------------------------------------------- /detectron2/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm, CycleBatchNormList 3 | from .deform_conv import DeformConv, ModulatedDeformConv 4 | from .mask_ops import paste_masks_in_image 5 | from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated 6 | from .roi_align import ROIAlign, roi_align 7 | from .roi_align_rotated import ROIAlignRotated, roi_align_rotated 8 | from .shape_spec import ShapeSpec 9 | from .wrappers import ( 10 | BatchNorm2d, 11 | Conv2d, 12 | ConvTranspose2d, 13 | cat, 14 | interpolate, 15 | Linear, 16 | nonzero_tuple, 17 | cross_entropy, 18 | shapes_to_tensor, 19 | ) 20 | from .blocks import CNNBlockBase, DepthwiseSeparableConv2d 21 | from .aspp import ASPP 22 | from .losses import ciou_loss, diou_loss 23 | 24 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 25 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | To add a new Op: 4 | 5 | 1. Create a new directory 6 | 2. Implement new ops there 7 | 3. Delcare its Python interface in `vision.cpp`. 8 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #pragma once 3 | #include 4 | 5 | namespace detectron2 { 6 | 7 | at::Tensor box_iou_rotated_cpu( 8 | const at::Tensor& boxes1, 9 | const at::Tensor& boxes2); 10 | 11 | #if defined(WITH_CUDA) || defined(WITH_HIP) 12 | at::Tensor box_iou_rotated_cuda( 13 | const at::Tensor& boxes1, 14 | const at::Tensor& boxes2); 15 | #endif 16 | 17 | // Interface for Python 18 | // inline is needed to prevent multiple function definitions when this header is 19 | // included by different cpps 20 | inline at::Tensor box_iou_rotated( 21 | const at::Tensor& boxes1, 22 | const at::Tensor& boxes2) { 23 | assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); 24 | if (boxes1.device().is_cuda()) { 25 | #if defined(WITH_CUDA) || defined(WITH_HIP) 26 | return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); 27 | #else 28 | AT_ERROR("Detectron2 is not compiled with GPU support!"); 29 | #endif 30 | } 31 | 32 | return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); 33 | } 34 | 35 | } // namespace detectron2 36 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #include "box_iou_rotated.h" 3 | #include "box_iou_rotated_utils.h" 4 | 5 | namespace detectron2 { 6 | 7 | template 8 | void box_iou_rotated_cpu_kernel( 9 | const at::Tensor& boxes1, 10 | const at::Tensor& boxes2, 11 | at::Tensor& ious) { 12 | auto num_boxes1 = boxes1.size(0); 13 | auto num_boxes2 = boxes2.size(0); 14 | 15 | for (int i = 0; i < num_boxes1; i++) { 16 | for (int j = 0; j < num_boxes2; j++) { 17 | ious[i * num_boxes2 + j] = single_box_iou_rotated( 18 | boxes1[i].data_ptr(), boxes2[j].data_ptr()); 19 | } 20 | } 21 | } 22 | 23 | at::Tensor box_iou_rotated_cpu( 24 | // input must be contiguous: 25 | const at::Tensor& boxes1, 26 | const at::Tensor& boxes2) { 27 | auto num_boxes1 = boxes1.size(0); 28 | auto num_boxes2 = boxes2.size(0); 29 | at::Tensor ious = 30 | at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); 31 | 32 | box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); 33 | 34 | // reshape from 1d array to 2d array 35 | auto shape = std::vector{num_boxes1, num_boxes2}; 36 | return ious.reshape(shape); 37 | } 38 | 39 | } // namespace detectron2 40 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/cuda_version.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | #include 4 | 5 | namespace detectron2 { 6 | int get_cudart_version() { 7 | // Not a ROCM platform: Either HIP is not used, or 8 | // it is used, but platform is not ROCM (i.e. it is CUDA) 9 | #if !defined(__HIP_PLATFORM_HCC__) 10 | return CUDART_VERSION; 11 | #else 12 | int version = 0; 13 | 14 | #if HIP_VERSION_MAJOR != 0 15 | // Create a convention similar to that of CUDA, as assumed by other 16 | // parts of the code. 17 | 18 | version = HIP_VERSION_MINOR; 19 | version += (HIP_VERSION_MAJOR * 100); 20 | #else 21 | hipRuntimeGetVersion(&version); 22 | #endif 23 | return version; 24 | #endif 25 | } 26 | } // namespace detectron2 27 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/nms_rotated/nms_rotated.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #pragma once 3 | #include 4 | 5 | namespace detectron2 { 6 | 7 | at::Tensor nms_rotated_cpu( 8 | const at::Tensor& dets, 9 | const at::Tensor& scores, 10 | const double iou_threshold); 11 | 12 | #if defined(WITH_CUDA) || defined(WITH_HIP) 13 | at::Tensor nms_rotated_cuda( 14 | const at::Tensor& dets, 15 | const at::Tensor& scores, 16 | const double iou_threshold); 17 | #endif 18 | 19 | // Interface for Python 20 | // inline is needed to prevent multiple function definitions when this header is 21 | // included by different cpps 22 | inline at::Tensor nms_rotated( 23 | const at::Tensor& dets, 24 | const at::Tensor& scores, 25 | const double iou_threshold) { 26 | assert(dets.device().is_cuda() == scores.device().is_cuda()); 27 | if (dets.device().is_cuda()) { 28 | #if defined(WITH_CUDA) || defined(WITH_HIP) 29 | return nms_rotated_cuda( 30 | dets.contiguous(), scores.contiguous(), iou_threshold); 31 | #else 32 | AT_ERROR("Detectron2 is not compiled with GPU support!"); 33 | #endif 34 | } 35 | 36 | return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); 37 | } 38 | 39 | } // namespace detectron2 40 | -------------------------------------------------------------------------------- /detectron2/layers/rotated_boxes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from __future__ import absolute_import, division, print_function, unicode_literals 3 | import torch 4 | 5 | 6 | def pairwise_iou_rotated(boxes1, boxes2): 7 | """ 8 | Return intersection-over-union (Jaccard index) of boxes. 9 | 10 | Both sets of boxes are expected to be in 11 | (x_center, y_center, width, height, angle) format. 12 | 13 | Arguments: 14 | boxes1 (Tensor[N, 5]) 15 | boxes2 (Tensor[M, 5]) 16 | 17 | Returns: 18 | iou (Tensor[N, M]): the NxM matrix containing the pairwise 19 | IoU values for every element in boxes1 and boxes2 20 | """ 21 | return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2) 22 | -------------------------------------------------------------------------------- /detectron2/layers/shape_spec.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | from collections import namedtuple 4 | 5 | 6 | class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): 7 | """ 8 | A simple structure that contains basic shape specification about a tensor. 9 | It is often used as the auxiliary inputs/outputs of models, 10 | to complement the lack of shape inference ability among pytorch modules. 11 | 12 | Attributes: 13 | channels: 14 | height: 15 | width: 16 | stride: 17 | """ 18 | 19 | def __new__(cls, channels=None, height=None, width=None, stride=None): 20 | return super().__new__(cls, channels, height, width, stride) 21 | -------------------------------------------------------------------------------- /detectron2/model_zoo/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | """ 3 | Model Zoo API for Detectron2: a collection of functions to create common model architectures 4 | listed in `MODEL_ZOO.md `_, 5 | and optionally load their pre-trained weights. 6 | """ 7 | 8 | from .model_zoo import get, get_config_file, get_checkpoint_url, get_config 9 | 10 | __all__ = ["get_checkpoint_url", "get", "get_config_file", "get_config"] 11 | -------------------------------------------------------------------------------- /detectron2/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from detectron2.layers import ShapeSpec 3 | 4 | from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY 5 | from .backbone import ( 6 | BACKBONE_REGISTRY, 7 | FPN, 8 | Backbone, 9 | ResNet, 10 | ResNetBlockBase, 11 | build_backbone, 12 | build_resnet_backbone, 13 | make_stage, 14 | ) 15 | from .meta_arch import ( 16 | META_ARCH_REGISTRY, 17 | SEM_SEG_HEADS_REGISTRY, 18 | GeneralizedRCNN, 19 | PanopticFPN, 20 | ProposalNetwork, 21 | RetinaNet, 22 | SemanticSegmentor, 23 | build_model, 24 | build_sem_seg_head, 25 | FCOS, 26 | ) 27 | from .postprocessing import detector_postprocess 28 | from .proposal_generator import ( 29 | PROPOSAL_GENERATOR_REGISTRY, 30 | build_proposal_generator, 31 | RPN_HEAD_REGISTRY, 32 | build_rpn_head, 33 | ) 34 | from .roi_heads import ( 35 | ROI_BOX_HEAD_REGISTRY, 36 | ROI_HEADS_REGISTRY, 37 | ROI_KEYPOINT_HEAD_REGISTRY, 38 | ROI_MASK_HEAD_REGISTRY, 39 | ROIHeads, 40 | StandardROIHeads, 41 | BaseMaskRCNNHead, 42 | BaseKeypointRCNNHead, 43 | FastRCNNOutputLayers, 44 | build_box_head, 45 | build_keypoint_head, 46 | build_mask_head, 47 | build_roi_heads, 48 | ) 49 | from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA 50 | from .mmdet_wrapper import MMDetBackbone, MMDetDetector 51 | 52 | _EXCLUDE = {"ShapeSpec"} 53 | __all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] 54 | 55 | 56 | from detectron2.utils.env import fixup_module_metadata 57 | 58 | fixup_module_metadata(__name__, globals(), __all__) 59 | del fixup_module_metadata 60 | -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip 3 | 4 | from .backbone import Backbone 5 | from .fpn import FPN 6 | from .regnet import RegNet 7 | from .resnet import ( 8 | BasicStem, 9 | ResNet, 10 | ResNetBlockBase, 11 | build_resnet_backbone, 12 | make_stage, 13 | BottleneckBlock, 14 | ) 15 | from .vit import ViT, SimpleFeaturePyramid, get_vit_lr_decay_rate 16 | 17 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 18 | # TODO can expose more resnet blocks after careful consideration 19 | -------------------------------------------------------------------------------- /detectron2/modeling/backbone/backbone.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from abc import ABCMeta, abstractmethod 3 | import torch.nn as nn 4 | 5 | from detectron2.layers import ShapeSpec 6 | 7 | __all__ = ["Backbone"] 8 | 9 | 10 | class Backbone(nn.Module, metaclass=ABCMeta): 11 | """ 12 | Abstract base class for network backbones. 13 | """ 14 | 15 | def __init__(self): 16 | """ 17 | The `__init__` method of any subclass can specify its own set of arguments. 18 | """ 19 | super().__init__() 20 | 21 | @abstractmethod 22 | def forward(self): 23 | """ 24 | Subclasses must override this method, but adhere to the same return type. 25 | 26 | Returns: 27 | dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor 28 | """ 29 | pass 30 | 31 | @property 32 | def size_divisibility(self) -> int: 33 | """ 34 | Some backbones require the input height and width to be divisible by a 35 | specific integer. This is typically true for encoder / decoder type networks 36 | with lateral connection (e.g., FPN) for which feature maps need to match 37 | dimension in the "bottom up" and "top down" paths. Set to 0 if no specific 38 | input size divisibility is required. 39 | """ 40 | return 0 41 | 42 | def output_shape(self): 43 | """ 44 | Returns: 45 | dict[str->ShapeSpec] 46 | """ 47 | # this is a backward-compatible default 48 | return { 49 | name: ShapeSpec( 50 | channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] 51 | ) 52 | for name in self._out_features 53 | } 54 | -------------------------------------------------------------------------------- /detectron2/modeling/backbone/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from detectron2.layers import ShapeSpec 3 | from detectron2.utils.registry import Registry 4 | 5 | from .backbone import Backbone 6 | 7 | BACKBONE_REGISTRY = Registry("BACKBONE") 8 | BACKBONE_REGISTRY.__doc__ = """ 9 | Registry for backbones, which extract feature maps from images 10 | 11 | The registered object must be a callable that accepts two arguments: 12 | 13 | 1. A :class:`detectron2.config.CfgNode` 14 | 2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. 15 | 16 | Registered object must return instance of :class:`Backbone`. 17 | """ 18 | 19 | 20 | def build_backbone(cfg, input_shape=None): 21 | """ 22 | Build a backbone from `cfg.MODEL.BACKBONE.NAME`. 23 | 24 | Returns: 25 | an instance of :class:`Backbone` 26 | """ 27 | if input_shape is None: 28 | input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) 29 | 30 | backbone_name = cfg.MODEL.BACKBONE.NAME 31 | backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) 32 | assert isinstance(backbone, Backbone) 33 | return backbone 34 | -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | from .build import META_ARCH_REGISTRY, build_model # isort:skip 5 | 6 | from .panoptic_fpn import PanopticFPN 7 | 8 | # import all the meta_arch, so they will be registered 9 | from .rcnn import GeneralizedRCNN, ProposalNetwork 10 | from .dense_detector import DenseDetector 11 | from .retinanet import RetinaNet 12 | from .fcos import FCOS 13 | from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head 14 | 15 | 16 | __all__ = list(globals().keys()) 17 | -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import torch 3 | 4 | from detectron2.utils.logger import _log_api_usage 5 | from detectron2.utils.registry import Registry 6 | 7 | META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip 8 | META_ARCH_REGISTRY.__doc__ = """ 9 | Registry for meta-architectures, i.e. the whole model. 10 | 11 | The registered object will be called with `obj(cfg)` 12 | and expected to return a `nn.Module` object. 13 | """ 14 | 15 | 16 | def build_model(cfg): 17 | """ 18 | Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. 19 | Note that it does not load any weights from ``cfg``. 20 | """ 21 | meta_arch = cfg.MODEL.META_ARCHITECTURE 22 | model = META_ARCH_REGISTRY.get(meta_arch)(cfg) 23 | model.to(torch.device(cfg.MODEL.DEVICE)) 24 | _log_api_usage("modeling.meta_arch." + meta_arch) 25 | return model 26 | -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator 3 | from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead 4 | 5 | __all__ = list(globals().keys()) 6 | -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from detectron2.utils.registry import Registry 3 | 4 | PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") 5 | PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ 6 | Registry for proposal generator, which produces object proposals from feature maps. 7 | 8 | The registered object will be called with `obj(cfg, input_shape)`. 9 | The call should return a `nn.Module` object. 10 | """ 11 | 12 | from . import rpn, rrpn # noqa F401 isort:skip 13 | 14 | 15 | def build_proposal_generator(cfg, input_shape): 16 | """ 17 | Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. 18 | The name can be "PrecomputedProposals" to use no proposal generator. 19 | """ 20 | name = cfg.MODEL.PROPOSAL_GENERATOR.NAME 21 | if name == "PrecomputedProposals": 22 | return None 23 | 24 | return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) 25 | -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head, FastRCNNConvFCHead 3 | from .keypoint_head import ( 4 | ROI_KEYPOINT_HEAD_REGISTRY, 5 | build_keypoint_head, 6 | BaseKeypointRCNNHead, 7 | KRCNNConvDeconvUpsampleHead, 8 | ) 9 | from .mask_head import ( 10 | ROI_MASK_HEAD_REGISTRY, 11 | build_mask_head, 12 | BaseMaskRCNNHead, 13 | MaskRCNNConvUpsampleHead, 14 | ) 15 | from .roi_heads import ( 16 | ROI_HEADS_REGISTRY, 17 | ROIHeads, 18 | Res5ROIHeads, 19 | StandardROIHeads, 20 | build_roi_heads, 21 | select_foreground_proposals, 22 | ) 23 | from .cascade_rcnn import CascadeROIHeads 24 | from .rotated_fast_rcnn import RROIHeads 25 | from .fast_rcnn import FastRCNNOutputLayers 26 | 27 | from . import cascade_rcnn # isort:skip 28 | 29 | __all__ = list(globals().keys()) 30 | -------------------------------------------------------------------------------- /detectron2/projects/README.md: -------------------------------------------------------------------------------- 1 | 2 | Projects live in the [`projects` directory](../../projects) under the root of this repository, but not here. 3 | -------------------------------------------------------------------------------- /detectron2/projects/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import importlib 3 | from pathlib import Path 4 | 5 | _PROJECTS = { 6 | "uniref": "UniRef" 7 | } 8 | _PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent / "projects" 9 | 10 | if _PROJECT_ROOT.is_dir(): 11 | # This is true only for in-place installation (pip install -e, setup.py develop), 12 | # where setup(package_dir=) does not work: https://github.com/pypa/setuptools/issues/230 13 | 14 | class _D2ProjectsFinder(importlib.abc.MetaPathFinder): 15 | def find_spec(self, name, path, target=None): 16 | if not name.startswith("detectron2.projects."): 17 | return 18 | project_name = name.split(".")[-1] 19 | project_dir = _PROJECTS.get(project_name) 20 | if not project_dir: 21 | return 22 | target_file = _PROJECT_ROOT / f"{project_dir}/{project_name}/__init__.py" 23 | if not target_file.is_file(): 24 | return 25 | return importlib.util.spec_from_file_location(name, target_file) 26 | 27 | import sys 28 | 29 | sys.meta_path.append(_D2ProjectsFinder()) 30 | -------------------------------------------------------------------------------- /detectron2/solver/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params 3 | from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, LRMultiplier, WarmupParamScheduler 4 | 5 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 6 | -------------------------------------------------------------------------------- /detectron2/structures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa, pairwise_point_box_distance 3 | from .image_list import ImageList 4 | 5 | from .instances import Instances 6 | from .keypoints import Keypoints, heatmaps_to_keypoints 7 | from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks 8 | from .rotated_boxes import RotatedBoxes 9 | from .rotated_boxes import pairwise_iou as pairwise_iou_rotated 10 | 11 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 12 | 13 | 14 | from detectron2.utils.env import fixup_module_metadata 15 | 16 | fixup_module_metadata(__name__, globals(), __all__) 17 | del fixup_module_metadata 18 | -------------------------------------------------------------------------------- /detectron2/tracking/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .base_tracker import ( # noqa 3 | BaseTracker, 4 | build_tracker_head, 5 | TRACKER_HEADS_REGISTRY, 6 | ) 7 | from .bbox_iou_tracker import BBoxIOUTracker # noqa 8 | from .hungarian_tracker import BaseHungarianTracker # noqa 9 | from .iou_weighted_hungarian_bbox_iou_tracker import ( # noqa 10 | IOUWeightedHungarianBBoxIOUTracker, 11 | ) 12 | from .utils import create_prediction_pairs # noqa 13 | from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker # noqa 14 | 15 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 16 | -------------------------------------------------------------------------------- /detectron2/tracking/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from detectron2.structures import Instances 3 | import numpy as np 4 | from typing import List 5 | 6 | 7 | def create_prediction_pairs( 8 | instances: Instances, 9 | prev_instances: Instances, 10 | iou_all: np.ndarray, 11 | threshold: float = 0.5, 12 | ) -> List: 13 | """ 14 | Args: 15 | instances: predictions from current frame 16 | prev_instances: predictions from previous frame 17 | iou_all: 2D numpy array containing iou for each bbox pair 18 | threshold: below the threshold, doesn't consider the pair of bbox is valid 19 | Return: 20 | List of bbox pairs 21 | """ 22 | bbox_pairs = [] 23 | for i in range(len(instances)): 24 | for j in range(len(prev_instances)): 25 | if iou_all[i, j] < threshold: 26 | continue 27 | bbox_pairs.append( 28 | { 29 | "idx": i, 30 | "prev_idx": j, 31 | "prev_id": prev_instances.ID[j], 32 | "IoU": iou_all[i, j], 33 | "prev_period": prev_instances.ID_period[j], 34 | } 35 | ) 36 | return bbox_pairs 37 | 38 | 39 | LARGE_COST_VALUE = 100000 40 | -------------------------------------------------------------------------------- /detectron2/utils/README.md: -------------------------------------------------------------------------------- 1 | # Utility functions 2 | 3 | This folder contain utility functions that are not used in the 4 | core library, but are useful for building models or training 5 | code using the config system. 6 | -------------------------------------------------------------------------------- /detectron2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /detectron2/utils/file_io.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathHandler 3 | from iopath.common.file_io import PathManager as PathManagerBase 4 | 5 | __all__ = ["PathManager", "PathHandler"] 6 | 7 | 8 | PathManager = PathManagerBase() 9 | """ 10 | This is a detectron2 project-specific PathManager. 11 | We try to stay away from global PathManager in fvcore as it 12 | introduces potential conflicts among other libraries. 13 | """ 14 | 15 | 16 | class Detectron2Handler(PathHandler): 17 | """ 18 | Resolve anything that's hosted under detectron2's namespace. 19 | """ 20 | 21 | PREFIX = "detectron2://" 22 | S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" 23 | 24 | def _get_supported_prefixes(self): 25 | return [self.PREFIX] 26 | 27 | def _get_local_path(self, path, **kwargs): 28 | name = path[len(self.PREFIX) :] 29 | return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name, **kwargs) 30 | 31 | def _open(self, path, mode="r", **kwargs): 32 | return PathManager.open(self._get_local_path(path), mode, **kwargs) 33 | 34 | 35 | PathManager.register_handler(HTTPURLHandler()) 36 | PathManager.register_handler(OneDrivePathHandler()) 37 | PathManager.register_handler(Detectron2Handler()) 38 | -------------------------------------------------------------------------------- /detectron2/utils/serialize.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import cloudpickle 3 | 4 | 5 | class PicklableWrapper(object): 6 | """ 7 | Wrap an object to make it more picklable, note that it uses 8 | heavy weight serialization libraries that are slower than pickle. 9 | It's best to use it only on closures (which are usually not picklable). 10 | 11 | This is a simplified version of 12 | https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py 13 | """ 14 | 15 | def __init__(self, obj): 16 | while isinstance(obj, PicklableWrapper): 17 | # Wrapping an object twice is no-op 18 | obj = obj._obj 19 | self._obj = obj 20 | 21 | def __reduce__(self): 22 | s = cloudpickle.dumps(self._obj) 23 | return cloudpickle.loads, (s,) 24 | 25 | def __call__(self, *args, **kwargs): 26 | return self._obj(*args, **kwargs) 27 | 28 | def __getattr__(self, attr): 29 | # Ensure that the wrapped object can be used seamlessly as the previous object. 30 | if attr not in ["_obj"]: 31 | return getattr(self._obj, attr) 32 | return getattr(self, attr) 33 | -------------------------------------------------------------------------------- /dev/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Some scripts for developers to use, include: 3 | 4 | - `linter.sh`: lint the codebase before commit. 5 | - `run_{inference,instant}_tests.sh`: run inference/training for a few iterations. 6 | Note that these tests require 2 GPUs. 7 | - `parse_results.sh`: parse results from a log file. 8 | -------------------------------------------------------------------------------- /dev/linter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # cd to detectron2 project root 5 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 6 | 7 | { 8 | black --version | grep -E "21\." > /dev/null 9 | } || { 10 | echo "Linter requires 'black==21.*' !" 11 | exit 1 12 | } 13 | 14 | ISORT_VERSION=$(isort --version-number) 15 | if [[ "$ISORT_VERSION" != 4.3* ]]; then 16 | echo "Linter requires isort==4.3.21 !" 17 | exit 1 18 | fi 19 | 20 | set -v 21 | 22 | echo "Running isort ..." 23 | isort -y -sp . --atomic 24 | 25 | echo "Running black ..." 26 | black -l 100 . 27 | 28 | echo "Running flake8 ..." 29 | if [ -x "$(command -v flake8-3)" ]; then 30 | flake8-3 . 31 | else 32 | python3 -m flake8 . 33 | fi 34 | 35 | # echo "Running mypy ..." 36 | # Pytorch does not have enough type annotations 37 | # mypy detectron2/solver detectron2/structures detectron2/config 38 | 39 | echo "Running clang-format ..." 40 | find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i 41 | 42 | command -v arc > /dev/null && arc lint 43 | -------------------------------------------------------------------------------- /dev/packaging/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## To build a cu101 wheel for release: 3 | 4 | ``` 5 | $ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101 6 | # inside the container: 7 | # git clone https://github.com/facebookresearch/detectron2/ 8 | # cd detectron2 9 | # export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.8 10 | # ./dev/packaging/build_wheel.sh 11 | ``` 12 | 13 | ## To build all wheels for combinations of CUDA and Python 14 | ``` 15 | ./dev/packaging/build_all_wheels.sh 16 | ./dev/packaging/gen_wheel_index.sh /path/to/wheels 17 | ``` 18 | -------------------------------------------------------------------------------- /dev/packaging/build_all_wheels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | [[ -d "dev/packaging" ]] || { 5 | echo "Please run this script at detectron2 root!" 6 | exit 1 7 | } 8 | 9 | build_one() { 10 | cu=$1 11 | pytorch_ver=$2 12 | 13 | case "$cu" in 14 | cu*) 15 | container_name=manylinux-cuda${cu/cu/} 16 | ;; 17 | cpu) 18 | container_name=manylinux-cuda101 19 | ;; 20 | *) 21 | echo "Unrecognized cu=$cu" 22 | exit 1 23 | ;; 24 | esac 25 | 26 | echo "Launching container $container_name ..." 27 | container_id="$container_name"_"$cu"_"$pytorch_ver" 28 | 29 | py_versions=(3.6 3.7 3.8 3.9) 30 | 31 | for py in "${py_versions[@]}"; do 32 | docker run -itd \ 33 | --name "$container_id" \ 34 | --mount type=bind,source="$(pwd)",target=/detectron2 \ 35 | pytorch/$container_name 36 | 37 | cat </dev/null 2>&1 && pwd )" 8 | . "$script_dir/pkg_helpers.bash" 9 | 10 | echo "Build Settings:" 11 | echo "CU_VERSION: $CU_VERSION" # e.g. cu101 12 | echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or "" 13 | echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.6 14 | echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4 15 | 16 | setup_cuda 17 | setup_wheel_python 18 | 19 | yum install ninja-build -y 20 | ln -sv /usr/bin/ninja-build /usr/bin/ninja || true 21 | 22 | pip_install pip numpy -U 23 | pip_install "torch==$PYTORCH_VERSION" \ 24 | -f https://download.pytorch.org/whl/"$CU_VERSION"/torch_stable.html 25 | 26 | # use separate directories to allow parallel build 27 | BASE_BUILD_DIR=build/$CU_VERSION-py$PYTHON_VERSION-pt$PYTORCH_VERSION 28 | python setup.py \ 29 | build -b "$BASE_BUILD_DIR" \ 30 | bdist_wheel -b "$BASE_BUILD_DIR/build_dist" -d "wheels/$CU_VERSION/torch$PYTORCH_VERSION" 31 | rm -rf "$BASE_BUILD_DIR" 32 | -------------------------------------------------------------------------------- /dev/packaging/gen_wheel_index.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | 5 | root=$(readlink -f $1) 6 | if [[ -z "$root" ]]; then 7 | echo "Usage: ./gen_wheel_index.sh /absolute/path/to/wheels" 8 | exit 9 | fi 10 | 11 | export LC_ALL=C # reproducible sort 12 | # NOTE: all sort in this script might not work when xx.10 is released 13 | 14 | index=$root/index.html 15 | 16 | cd "$root" 17 | for cu in cpu cu92 cu100 cu101 cu102 cu110 cu111 cu113; do 18 | mkdir -p "$root/$cu" 19 | cd "$root/$cu" 20 | echo "Creating $PWD/index.html ..." 21 | # First sort by torch version, then stable sort by d2 version with unique. 22 | # As a result, the latest torch version for each d2 version is kept. 23 | for whl in $(find -type f -name '*.whl' -printf '%P\n' \ 24 | | sort -k 1 -r | sort -t '/' -k 2 --stable -r --unique); do 25 | echo "$whl
" 26 | done > index.html 27 | 28 | 29 | for torch in torch*; do 30 | cd "$root/$cu/$torch" 31 | 32 | # list all whl for each cuda,torch version 33 | echo "Creating $PWD/index.html ..." 34 | for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort -r); do 35 | echo "$whl
" 36 | done > index.html 37 | done 38 | done 39 | 40 | cd "$root" 41 | # Just list everything: 42 | echo "Creating $index ..." 43 | for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort -r); do 44 | echo "$whl
" 45 | done > "$index" 46 | 47 | -------------------------------------------------------------------------------- /dev/run_inference_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | BIN="python tools/train_net.py" 5 | OUTPUT="inference_test_output" 6 | NUM_GPUS=2 7 | 8 | CFG_LIST=( "${@:1}" ) 9 | 10 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 11 | CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) 12 | fi 13 | 14 | echo "========================================================================" 15 | echo "Configs to run:" 16 | echo "${CFG_LIST[@]}" 17 | echo "========================================================================" 18 | 19 | 20 | for cfg in "${CFG_LIST[@]}"; do 21 | echo "========================================================================" 22 | echo "Running $cfg ..." 23 | echo "========================================================================" 24 | $BIN \ 25 | --eval-only \ 26 | --num-gpus $NUM_GPUS \ 27 | --config-file "$cfg" \ 28 | OUTPUT_DIR $OUTPUT 29 | rm -rf $OUTPUT 30 | done 31 | 32 | 33 | echo "========================================================================" 34 | echo "Running demo.py ..." 35 | echo "========================================================================" 36 | DEMO_BIN="python demo/demo.py" 37 | COCO_DIR=datasets/coco/val2014 38 | mkdir -pv $OUTPUT 39 | 40 | set -v 41 | 42 | $DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \ 43 | --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT 44 | rm -rf $OUTPUT 45 | -------------------------------------------------------------------------------- /dev/run_instant_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | BIN="python tools/train_net.py" 5 | OUTPUT="instant_test_output" 6 | NUM_GPUS=2 7 | 8 | CFG_LIST=( "${@:1}" ) 9 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 10 | CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) 11 | fi 12 | 13 | echo "========================================================================" 14 | echo "Configs to run:" 15 | echo "${CFG_LIST[@]}" 16 | echo "========================================================================" 17 | 18 | for cfg in "${CFG_LIST[@]}"; do 19 | echo "========================================================================" 20 | echo "Running $cfg ..." 21 | echo "========================================================================" 22 | $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ 23 | SOLVER.IMS_PER_BATCH $(($NUM_GPUS * 2)) \ 24 | OUTPUT_DIR "$OUTPUT" 25 | rm -rf "$OUTPUT" 26 | done 27 | 28 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Use the container (with docker ≥ 19.03) 3 | 4 | ``` 5 | cd docker/ 6 | # Build: 7 | docker build --build-arg USER_ID=$UID -t detectron2:v0 . 8 | # Launch (require GPUs): 9 | docker run --gpus all -it \ 10 | --shm-size=8gb --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \ 11 | --name=detectron2 detectron2:v0 12 | 13 | # Grant docker access to host X server to show images 14 | xhost +local:`docker inspect --format='{{ .Config.Hostname }}' detectron2` 15 | ``` 16 | 17 | ## Use the container (with docker-compose ≥ 1.28.0) 18 | 19 | Install docker-compose and nvidia-docker-toolkit, then run: 20 | ``` 21 | cd docker && USER_ID=$UID docker-compose run detectron2 22 | ``` 23 | 24 | ## Use the deployment container (to test C++ examples) 25 | After building the base detectron2 container as above, do: 26 | ``` 27 | # Build: 28 | docker build -t detectron2-deploy:v0 -f deploy.Dockerfile . 29 | # Launch: 30 | docker run --gpus all -it detectron2-deploy:v0 31 | ``` 32 | 33 | #### Using a persistent cache directory 34 | 35 | You can prevent models from being re-downloaded on every run, 36 | by storing them in a cache directory. 37 | 38 | To do this, add `--volume=$HOME/.torch/fvcore_cache:/tmp:rw` in the run command. 39 | 40 | ## Install new dependencies 41 | Add the following to `Dockerfile` to make persistent changes. 42 | ``` 43 | RUN sudo apt-get update && sudo apt-get install -y vim 44 | ``` 45 | Or run them in the container to make temporary changes. 46 | -------------------------------------------------------------------------------- /docker/deploy.Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # This file defines a container that compiles the C++ examples of detectron2. 3 | # See docker/README.md for usage. 4 | 5 | # Depends on the image produced by "./Dockerfile" 6 | FROM detectron2:v0 7 | 8 | USER appuser 9 | ENV HOME=/home/appuser 10 | WORKDIR $HOME 11 | 12 | # Let torchvision find libtorch 13 | ENV CMAKE_PREFIX_PATH=$HOME/.local/lib/python3.6/site-packages/torch/ 14 | 15 | RUN sudo apt-get update && sudo apt-get install libopencv-dev --yes 16 | 17 | # install libtorchvision 18 | RUN git clone --branch v0.11.1 https://github.com/pytorch/vision/ 19 | RUN mkdir vision/build && cd vision/build && \ 20 | cmake .. -DCMAKE_INSTALL_PREFIX=$HOME/.local -DCMAKE_BUILD_TYPE=Release -DWITH_CUDA=on -DTORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST && \ 21 | make -j && make install 22 | 23 | # make our installation take effect 24 | ENV CPATH=$HOME/.local/include \ 25 | LIBRARY_PATH=$HOME/.local/lib \ 26 | LD_LIBRARY_PATH=$HOME/.local/lib 27 | 28 | 29 | # build C++ examples of detectron2 30 | RUN cd detectron2_repo/tools/deploy && mkdir build && cd build && \ 31 | cmake -DTORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST .. && make 32 | # binaries will be available under tools/deploy/build 33 | -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | detectron2: 4 | build: 5 | context: . 6 | dockerfile: Dockerfile 7 | args: 8 | USER_ID: ${USER_ID:-1000} 9 | deploy: 10 | resources: 11 | reservations: 12 | devices: 13 | - capabilities: 14 | - gpu 15 | shm_size: "8gb" 16 | ulimits: 17 | memlock: -1 18 | stack: 67108864 19 | volumes: 20 | - /tmp/.X11-unix:/tmp/.X11-unix:ro 21 | environment: 22 | - DISPLAY=$DISPLAY 23 | - NVIDIA_VISIBLE_DEVICES=all 24 | # Uncomment with proper source to access webcam from docker 25 | # devices: 26 | # - /dev/video0:/dev/video0 27 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 20 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Read the docs: 2 | 3 | The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). 4 | Documents in this directory are not meant to be read on github. 5 | 6 | # Build the docs: 7 | 8 | 1. Install detectron2 according to [INSTALL.md](../INSTALL.md). 9 | 2. Install additional libraries required to build docs: 10 | - docutils==0.16 11 | - Sphinx==3.2.0 12 | - recommonmark==0.6.0 13 | - sphinx_rtd_theme 14 | 15 | 3. Run `make html` from this directory. 16 | -------------------------------------------------------------------------------- /docs/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * some extra css to make markdown look similar between github/sphinx 4 | */ 5 | 6 | /* 7 | * Below is for install.md: 8 | */ 9 | .rst-content code { 10 | white-space: pre; 11 | border: 0px; 12 | } 13 | 14 | .rst-content th { 15 | border: 1px solid #e1e4e5; 16 | } 17 | 18 | .rst-content th p { 19 | /* otherwise will be default 24px for regular paragraph */ 20 | margin-bottom: 0px; 21 | } 22 | 23 | .rst-content .line-block { 24 | /* otherwise will be 24px */ 25 | margin-bottom: 0px; 26 | } 27 | 28 | div.section > details { 29 | padding-bottom: 1em; 30 | } 31 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. detectron2 documentation master file, created by 2 | sphinx-quickstart on Sat Sep 21 13:46:45 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to detectron2's documentation! 7 | ====================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | tutorials/index 13 | notes/index 14 | modules/index 15 | -------------------------------------------------------------------------------- /docs/modules/checkpoint.rst: -------------------------------------------------------------------------------- 1 | detectron2.checkpoint 2 | ============================= 3 | 4 | .. automodule:: detectron2.checkpoint 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/config.rst: -------------------------------------------------------------------------------- 1 | detectron2.config 2 | ========================= 3 | 4 | Related tutorials: :doc:`../tutorials/configs`, :doc:`../tutorials/extend`. 5 | 6 | .. automodule:: detectron2.config 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | 11 | 12 | Yaml Config References 13 | ----------------- 14 | 15 | .. literalinclude:: ../../detectron2/config/defaults.py 16 | :language: python 17 | :linenos: 18 | :lines: 7- 19 | -------------------------------------------------------------------------------- /docs/modules/data.rst: -------------------------------------------------------------------------------- 1 | detectron2.data 2 | ======================= 3 | 4 | .. autodata:: detectron2.data.DatasetCatalog(dict) 5 | :annotation: 6 | 7 | .. autodata:: detectron2.data.MetadataCatalog(dict) 8 | :annotation: 9 | 10 | .. automodule:: detectron2.data 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | detectron2.data.detection\_utils module 16 | --------------------------------------- 17 | 18 | .. automodule:: detectron2.data.detection_utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | detectron2.data.datasets module 24 | --------------------------------------- 25 | 26 | .. automodule:: detectron2.data.datasets 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | detectron2.data.samplers module 32 | --------------------------------------- 33 | 34 | .. automodule:: detectron2.data.samplers 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /docs/modules/data_transforms.rst: -------------------------------------------------------------------------------- 1 | detectron2.data.transforms 2 | ==================================== 3 | 4 | Related tutorial: :doc:`../tutorials/augmentation`. 5 | 6 | .. automodule:: detectron2.data.transforms 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | :imported-members: 11 | -------------------------------------------------------------------------------- /docs/modules/engine.rst: -------------------------------------------------------------------------------- 1 | detectron2.engine 2 | ========================= 3 | 4 | Related tutorial: :doc:`../tutorials/training`. 5 | 6 | .. automodule:: detectron2.engine 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | 11 | 12 | detectron2.engine.defaults module 13 | --------------------------------- 14 | 15 | .. automodule:: detectron2.engine.defaults 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | 20 | detectron2.engine.hooks module 21 | --------------------------------- 22 | 23 | .. automodule:: detectron2.engine.hooks 24 | :members: 25 | :undoc-members: 26 | :show-inheritance: 27 | -------------------------------------------------------------------------------- /docs/modules/evaluation.rst: -------------------------------------------------------------------------------- 1 | detectron2.evaluation 2 | ============================= 3 | 4 | .. automodule:: detectron2.evaluation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/export.rst: -------------------------------------------------------------------------------- 1 | detectron2.export 2 | ========================= 3 | 4 | Related tutorial: :doc:`../tutorials/deployment`. 5 | 6 | .. automodule:: detectron2.export 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/modules/fvcore.rst: -------------------------------------------------------------------------------- 1 | fvcore documentation 2 | ==================== 3 | 4 | Detectron2 depends on utilities in 5 | `fvcore `_. 6 | We include part of fvcore documentation here for easier reference. 7 | 8 | fvcore.nn 9 | ----------------- 10 | 11 | .. automodule:: fvcore.nn 12 | :members: 13 | :inherited-members: 14 | :undoc-members: 15 | :show-inheritance: 16 | 17 | fvcore.common 18 | --------------------- 19 | 20 | .. automodule:: fvcore.common.checkpoint 21 | :members: 22 | :undoc-members: 23 | :show-inheritance: 24 | 25 | .. automodule:: fvcore.common.config 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | .. automodule:: fvcore.common.history_buffer 31 | :members: 32 | :undoc-members: 33 | :show-inheritance: 34 | 35 | .. automodule:: fvcore.common.param_scheduler 36 | :members: 37 | :inherited-members: 38 | :undoc-members: 39 | :show-inheritance: 40 | 41 | .. automodule:: fvcore.common.registry 42 | :members: 43 | :undoc-members: 44 | :show-inheritance: 45 | 46 | .. automodule:: fvcore.common.timer 47 | :members: 48 | :undoc-members: 49 | :show-inheritance: 50 | -------------------------------------------------------------------------------- /docs/modules/index.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================== 3 | 4 | .. toctree:: 5 | 6 | checkpoint 7 | config 8 | data 9 | data_transforms 10 | engine 11 | evaluation 12 | layers 13 | model_zoo 14 | modeling 15 | solver 16 | structures 17 | utils 18 | export 19 | fvcore 20 | -------------------------------------------------------------------------------- /docs/modules/layers.rst: -------------------------------------------------------------------------------- 1 | detectron2.layers 2 | ========================= 3 | 4 | .. automodule:: detectron2.layers 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/model_zoo.rst: -------------------------------------------------------------------------------- 1 | detectron2.model_zoo 2 | ============================ 3 | 4 | .. automodule:: detectron2.model_zoo 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/modeling.rst: -------------------------------------------------------------------------------- 1 | detectron2.modeling 2 | =========================== 3 | 4 | .. automodule:: detectron2.modeling 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | 10 | detectron2.modeling.poolers module 11 | --------------------------------------- 12 | 13 | .. automodule:: detectron2.modeling.poolers 14 | :members: 15 | :undoc-members: 16 | :show-inheritance: 17 | 18 | 19 | detectron2.modeling.sampling module 20 | ------------------------------------ 21 | 22 | .. automodule:: detectron2.modeling.sampling 23 | :members: 24 | :undoc-members: 25 | :show-inheritance: 26 | 27 | 28 | detectron2.modeling.box_regression module 29 | ------------------------------------------ 30 | 31 | .. automodule:: detectron2.modeling.box_regression 32 | :members: 33 | :undoc-members: 34 | :show-inheritance: 35 | 36 | 37 | Model Registries 38 | ----------------- 39 | 40 | These are different registries provided in modeling. 41 | Each registry provide you the ability to replace it with your customized component, 42 | without having to modify detectron2's code. 43 | 44 | Note that it is impossible to allow users to customize any line of code directly. 45 | Even just to add one line at some place, 46 | you'll likely need to find out the smallest registry which contains that line, 47 | and register your component to that registry. 48 | 49 | 50 | .. autodata:: detectron2.modeling.META_ARCH_REGISTRY 51 | .. autodata:: detectron2.modeling.BACKBONE_REGISTRY 52 | .. autodata:: detectron2.modeling.PROPOSAL_GENERATOR_REGISTRY 53 | .. autodata:: detectron2.modeling.RPN_HEAD_REGISTRY 54 | .. autodata:: detectron2.modeling.ANCHOR_GENERATOR_REGISTRY 55 | .. autodata:: detectron2.modeling.ROI_HEADS_REGISTRY 56 | .. autodata:: detectron2.modeling.ROI_BOX_HEAD_REGISTRY 57 | .. autodata:: detectron2.modeling.ROI_MASK_HEAD_REGISTRY 58 | .. autodata:: detectron2.modeling.ROI_KEYPOINT_HEAD_REGISTRY 59 | -------------------------------------------------------------------------------- /docs/modules/solver.rst: -------------------------------------------------------------------------------- 1 | detectron2.solver 2 | ========================= 3 | 4 | .. automodule:: detectron2.solver 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/structures.rst: -------------------------------------------------------------------------------- 1 | detectron2.structures 2 | ============================= 3 | 4 | .. automodule:: detectron2.structures 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/utils.rst: -------------------------------------------------------------------------------- 1 | detectron2.utils 2 | ======================== 3 | 4 | detectron2.utils.colormap module 5 | -------------------------------- 6 | 7 | .. automodule:: detectron2.utils.colormap 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | 12 | detectron2.utils.comm module 13 | ---------------------------- 14 | 15 | .. automodule:: detectron2.utils.comm 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | 20 | 21 | detectron2.utils.events module 22 | ------------------------------ 23 | 24 | .. automodule:: detectron2.utils.events 25 | :members: 26 | :undoc-members: 27 | :show-inheritance: 28 | 29 | 30 | detectron2.utils.logger module 31 | ------------------------------ 32 | 33 | .. automodule:: detectron2.utils.logger 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | 38 | 39 | detectron2.utils.registry module 40 | -------------------------------- 41 | 42 | .. automodule:: detectron2.utils.registry 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | detectron2.utils.memory module 48 | ---------------------------------- 49 | 50 | .. automodule:: detectron2.utils.memory 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | 56 | detectron2.utils.analysis module 57 | ---------------------------------- 58 | 59 | .. automodule:: detectron2.utils.analysis 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | 64 | 65 | detectron2.utils.visualizer module 66 | ---------------------------------- 67 | 68 | .. automodule:: detectron2.utils.visualizer 69 | :members: 70 | :undoc-members: 71 | :show-inheritance: 72 | 73 | detectron2.utils.video\_visualizer module 74 | ----------------------------------------- 75 | 76 | .. automodule:: detectron2.utils.video_visualizer 77 | :members: 78 | :undoc-members: 79 | :show-inheritance: 80 | 81 | -------------------------------------------------------------------------------- /docs/notes/contributing.md: -------------------------------------------------------------------------------- 1 | ../../.github/CONTRIBUTING.md -------------------------------------------------------------------------------- /docs/notes/index.rst: -------------------------------------------------------------------------------- 1 | Notes 2 | ====================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | benchmarks 8 | compatibility 9 | contributing 10 | changelog 11 | -------------------------------------------------------------------------------- /docs/tutorials/README.md: -------------------------------------------------------------------------------- 1 | # Read the docs: 2 | 3 | The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). 4 | Documents in this directory are not meant to be read on github. 5 | -------------------------------------------------------------------------------- /docs/tutorials/builtin_datasets.md: -------------------------------------------------------------------------------- 1 | ../../datasets/README.md -------------------------------------------------------------------------------- /docs/tutorials/getting_started.md: -------------------------------------------------------------------------------- 1 | ../../GETTING_STARTED.md -------------------------------------------------------------------------------- /docs/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ====================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | install 8 | getting_started 9 | builtin_datasets 10 | extend 11 | datasets 12 | data_loading 13 | augmentation 14 | models 15 | write-models 16 | training 17 | evaluation 18 | configs 19 | lazyconfigs 20 | deployment 21 | -------------------------------------------------------------------------------- /docs/tutorials/install.md: -------------------------------------------------------------------------------- 1 | ../../INSTALL.md -------------------------------------------------------------------------------- /external/davis2017-evaluation/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | docs/site/ 99 | /site 100 | 101 | # mypy 102 | .mypy_cache/ 103 | 104 | # pytest 105 | .pytest_cache 106 | 107 | # Pylint 108 | .pylintrc 109 | 110 | # PyCharm 111 | .idea/ 112 | .DS_Store 113 | 114 | # Generated C code 115 | _mask.c 116 | -------------------------------------------------------------------------------- /external/davis2017-evaluation/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, DAVIS: Densely Annotated VIdeo Segmentation 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /external/davis2017-evaluation/davis2017/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | __version__ = '0.1.0' 4 | -------------------------------------------------------------------------------- /external/davis2017-evaluation/davis2017/results.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from PIL import Image 4 | import sys 5 | 6 | 7 | class Results(object): 8 | def __init__(self, root_dir): 9 | self.root_dir = root_dir 10 | 11 | def _read_mask(self, sequence, frame_id): 12 | try: 13 | mask_path = os.path.join(self.root_dir, sequence, f'{frame_id}.png') 14 | return np.array(Image.open(mask_path)) 15 | except IOError as err: 16 | sys.stdout.write(sequence + " frame %s not found!\n" % frame_id) 17 | sys.stdout.write("The frames have to be indexed PNG files placed inside the corespondent sequence " 18 | "folder.\nThe indexes have to match with the initial frame.\n") 19 | sys.stderr.write("IOError: " + err.strerror + "\n") 20 | sys.exit() 21 | 22 | def read_masks(self, sequence, masks_id): 23 | mask_0 = self._read_mask(sequence, masks_id[0]) 24 | masks = np.zeros((len(masks_id), *mask_0.shape)) 25 | for ii, m in enumerate(masks_id): 26 | masks[ii, ...] = self._read_mask(sequence, m) 27 | num_objects = int(np.max(masks)) 28 | tmp = np.ones((num_objects, *masks.shape)) 29 | tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None] 30 | masks = (tmp == masks[None, ...]) > 0 31 | return masks 32 | -------------------------------------------------------------------------------- /external/davis2017-evaluation/setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = davis2017 3 | version = attr: davis2017.__version__ 4 | description = Evaluation Framework for DAVIS 2017 Semi-supervised and Unsupervised used in the DAVIS Challenges 5 | long_description = file: README.md 6 | long_description_content_type = text/markdown 7 | keywords = segmentation 8 | license = GPL v3 9 | author = Sergi Caelles 10 | author-email = scaelles@vision.ee.ethz.ch 11 | home-page = https://github.com/davisvideochallenge/davis2017-evaluation 12 | classifiers = 13 | Development Status :: 4 - Beta 14 | Intended Audience :: Developers 15 | Intended Audience :: Education 16 | Intended Audience :: Science/Research 17 | License :: OSI Approved :: GNU General Public License v3 (GPLv3) 18 | Programming Language :: Python :: 3.6 19 | Programming Language :: Python :: 3.7 20 | Topic :: Scientific/Engineering :: Human Machine Interfaces 21 | Topic :: Software Development :: Libraries 22 | Topic :: Software Development :: Libraries :: Python Modules 23 | -------------------------------------------------------------------------------- /external/davis2017-evaluation/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | import sys 3 | 4 | if sys.version_info < (3, 6): 5 | sys.exit('Sorry, only Python >= 3.6 is supported') 6 | 7 | setup( 8 | python_requires='>=3.6, <4', 9 | install_requires=[ 10 | 'Pillow>=4.1.1', 11 | 'networkx>=2.0', 12 | 'numpy>=1.12.1', 13 | 'opencv-python>=4.0.0.21', 14 | 'pandas>=0.21.1', 15 | 'pathlib2;python_version<"3.5"', 16 | 'scikit-image>=0.13.1', 17 | 'scikit-learn>=0.18', 18 | 'scipy>=1.0.0', 19 | 'tqdm>=4.28.1' 20 | ]) 21 | -------------------------------------------------------------------------------- /external/lvos-evaluation/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/external/lvos-evaluation/.DS_Store -------------------------------------------------------------------------------- /external/lvos-evaluation/lvos/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | __version__ = '0.1.0' 4 | -------------------------------------------------------------------------------- /external/lvos-evaluation/lvos/utils.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import math 3 | import os 4 | import warnings 5 | 6 | import numpy as np 7 | from PIL import Image 8 | 9 | 10 | def db_statistics(per_frame_values): 11 | """ Compute mean,recall and decay from per-frame evaluation. 12 | Arguments: 13 | per_frame_values (ndarray): per-frame evaluation 14 | 15 | Returns: 16 | M,O,D (float,float,float): 17 | return evaluation statistics: mean,recall,decay. 18 | """ 19 | 20 | # strip off nan values 21 | with warnings.catch_warnings(): 22 | warnings.simplefilter("ignore", category=RuntimeWarning) 23 | M = np.nanmean(per_frame_values) 24 | O = np.nanmean(per_frame_values > 0.5) 25 | 26 | N_bins = 4 27 | ids = np.round(np.linspace(1, len(per_frame_values), N_bins + 1) + 1e-10) - 1 28 | ids = ids.astype(np.uint8) 29 | 30 | D_bins = [per_frame_values[ids[i]:ids[i + 1] + 1] for i in range(0, 4)] 31 | 32 | with warnings.catch_warnings(): 33 | warnings.simplefilter("ignore", category=RuntimeWarning) 34 | D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3]) 35 | 36 | 37 | 38 | return M, O, D 39 | 40 | def db_statistics_var(per_frame_values_j,per_frame_values_f): 41 | JF=(per_frame_values_j+per_frame_values_f)/2 42 | JFM=np.nanmean(JF) 43 | value_len=JF.shape[1] 44 | var=(JFM-JF) 45 | V=(np.nansum((var**2))/value_len) 46 | V= round (V,4) 47 | V=math.sqrt(V) 48 | 49 | return V 50 | 51 | 52 | -------------------------------------------------------------------------------- /external/lvos-evaluation/setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = lvos 3 | version = attr: lvos.__version__ 4 | description = Evaluation Framework for LVOS Semi-supervised Challenge 5 | long_description = file: README.md 6 | long_description_content_type = text/markdown 7 | keywords = segmentation 8 | license = GPL v3 9 | author = Sergi Caelles 10 | author-email = scaelles@vision.ee.ethz.ch 11 | home-page = https://github.com/LingyiHongfd/lvos-evaluation 12 | classifiers = 13 | Development Status :: 4 - Beta 14 | Intended Audience :: Developers 15 | Intended Audience :: Education 16 | Intended Audience :: Science/Research 17 | License :: OSI Approved :: GNU General Public License v3 (GPLv3) 18 | Programming Language :: Python :: 3.6 19 | Programming Language :: Python :: 3.7 20 | Topic :: Scientific/Engineering :: Human Machine Interfaces 21 | Topic :: Software Development :: Libraries 22 | Topic :: Software Development :: Libraries :: Python Modules 23 | -------------------------------------------------------------------------------- /external/lvos-evaluation/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | import sys 3 | 4 | if sys.version_info < (3, 6): 5 | sys.exit('Sorry, only Python >= 3.6 is supported') 6 | 7 | setup( 8 | python_requires='>=3.6, <4', 9 | install_requires=[ 10 | 'Pillow>=4.1.1', 11 | 'networkx>=2.0', 12 | 'numpy>=1.12.1', 13 | 'opencv-python>=4.0.0.21', 14 | 'pandas>=0.21.1', 15 | 'pathlib2;python_version<"3.5"', 16 | 'scikit-image>=0.13.1', 17 | 'scikit-learn>=0.18', 18 | 'scipy>=1.0.0', 19 | 'tqdm>=4.28.1' 20 | ]) 21 | -------------------------------------------------------------------------------- /external/lvos-evaluation/unseen_videos.txt: -------------------------------------------------------------------------------- 1 | R3UJGYwy 2 | 058oeZ2p 3 | UtEnRpiP 4 | yfzVCnvU 5 | 6mNj04tC 6 | yWnanBID 7 | gw84JOqH 8 | 711gAS21 9 | FmzvB44A 10 | Oj1z5fO0 11 | tAyQIobn 12 | yxCvN6OJ 13 | LWVlKBlK 14 | K3OUeINk 15 | xpI7xRWN 16 | aFytsETk 17 | FiRTBMg2 18 | nfcT3owb 19 | v3uNUctx 20 | JGG6MrhF 21 | 49TNsJzk 22 | dtHbJvYy 23 | 8lxxCA5h 24 | N6CONZUW 25 | vJ8W2TO5 26 | gdqCcvs2 27 | pMntJwSQ 28 | -------------------------------------------------------------------------------- /projects/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/projects/.DS_Store -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/r50/eval_fss_r50.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../image/joint_task_finetune_det_rec_fss_r50_16gpu.yaml" 2 | DATASETS: 3 | TEST: ("fss-1000-val",) -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/r50/eval_rec_r50.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../image/joint_task_finetune_det_rec_fss_r50_16gpu.yaml" 2 | DATASETS: 3 | TEST: ("refcoco-unc-val", "refcoco-unc-testA", "refcoco-unc-testB", "refcocoplus-unc-val", "refcocoplus-unc-testA", "refcocoplus-unc-testB", "refcocog-umd-val", "refcocog-umd-test") -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/r50/eval_rvos_r50.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../video/joint_task_vos_rvos_r50_16gpu.yaml" 2 | DATASETS: 3 | TEST: 4 | - "refytvos-val" 5 | - "refdavis-val-0" 6 | - "refdavis-val-1" 7 | - "refdavis-val-2" 8 | - "refdavis-val-3" -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/r50/eval_vos_r50.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../video/joint_task_vos_rvos_r50_16gpu.yaml" 2 | DATASETS: 3 | TEST: 4 | - "ytbvos18-val" 5 | - "ytbvos19-val" 6 | - "davis17-val" 7 | - "vos-lvos-val" 8 | - "mose-val" -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/swin-l/eval_fss_swin-l.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../image/joint_task_finetune_det_rec_fss_swin-l_16gpu.yaml" 2 | DATASETS: 3 | TEST: ("fss-1000-val",) -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/swin-l/eval_rec_swin-l.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../image/joint_task_finetune_det_rec_fss_swin-l_16gpu.yaml" 2 | DATASETS: 3 | TEST: ("refcoco-unc-val", "refcoco-unc-testA", "refcoco-unc-testB", "refcocoplus-unc-val", "refcocoplus-unc-testA", "refcocoplus-unc-testB", "refcocog-umd-val", "refcocog-umd-test") -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/swin-l/eval_rvos_swin-l.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../video/joint_task_vos_rvos_swin-l_16gpu.yaml" 2 | DATASETS: 3 | TEST: 4 | - "refytvos-val" 5 | - "refdavis-val-0" 6 | - "refdavis-val-1" 7 | - "refdavis-val-2" 8 | - "refdavis-val-3" -------------------------------------------------------------------------------- /projects/UniRef/configs/eval/swin-l/eval_vos_swin-l.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../../video/joint_task_vos_rvos_swin-l_16gpu.yaml" 2 | DATASETS: 3 | TEST: 4 | - "ytbvos18-val" 5 | - "ytbvos19-val" 6 | - "davis17-val" 7 | - "vos-lvos-val" 8 | - "mose-val" -------------------------------------------------------------------------------- /projects/UniRef/configs/image/joint_task_det_rec_r50_16gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "UniRef" 3 | WEIGHTS: "pretrained_models/R-50.pkl" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: True 7 | OTA: True 8 | WITH_LANG_REF: True 9 | LANG_CONFIG: 10 | FREEZE_TEXT_ENCODER: False 11 | MODEL_TYPE: "bert-base" 12 | RESNETS: 13 | DEPTH: 50 14 | STRIDE_IN_1X1: False 15 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 16 | DDETRS: 17 | IN_FEATURES: ["res3", "res4", "res5"] 18 | TO_AGNOSTIC: True 19 | NUM_CLASSES: 1 20 | NUM_OBJECT_QUERIES: 300 21 | TWO_STAGE_NUM_PROPOSALS: 300 22 | DROPOUT: 0.0 23 | TWO_STAGE: True 24 | MIXED_SELECTION: True 25 | LOOK_FORWARD_TWICE: True 26 | USE_BACKBONE_FEATURE: True 27 | DATASETS: 28 | TRAIN: ("coco_2017_train", "refcoco-mixed") 29 | TEST: ("refcoco-unc-val",) 30 | SOLVER: 31 | IMS_PER_BATCH: 16 # batch-per-gpu = 2 32 | BASE_LR: 0.0002 33 | STEPS: (75000,) # (76658,) 34 | MAX_ITER: 90000 # 91990 35 | WARMUP_FACTOR: 1.0 36 | WARMUP_ITERS: 200 37 | WEIGHT_DECAY: 0.05 38 | OPTIMIZER: "ADAMW" 39 | BACKBONE_MULTIPLIER: 0.1 40 | CLIP_GRADIENTS: 41 | ENABLED: True 42 | CLIP_TYPE: "full_model" 43 | CLIP_VALUE: 0.1 44 | NORM_TYPE: 2.0 45 | CHECKPOINT_PERIOD: 5000 46 | INPUT: 47 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 48 | CROP: 49 | ENABLED: True 50 | TYPE: "absolute_range" 51 | SIZE: (384, 600) 52 | FORMAT: "RGB" 53 | TEST: 54 | EVAL_PERIOD: 5000 55 | DATALOADER: 56 | SAMPLER_TRAIN: "MultiDatasetSampler" 57 | DATASET_RATIO: [1, 1] 58 | USE_DIFF_BS_SIZE: True 59 | DATASET_BS: [2, 2] 60 | USE_RFS: [False, False] 61 | FILTER_EMPTY_ANNOTATIONS: True 62 | NUM_WORKERS: 8 63 | OUTPUT_DIR: outputs/joint_task_det_rec_r50_16gpu 64 | VERSION: 2 65 | -------------------------------------------------------------------------------- /projects/UniRef/configs/image/joint_task_det_rec_swin-l_16gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "UniRef" 3 | WEIGHTS: "pretrained_models/swin_large_patch4_window12_384_22k.pkl" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: True 7 | OTA: True 8 | WITH_LANG_REF: True 9 | LANG_CONFIG: 10 | FREEZE_TEXT_ENCODER: False 11 | MODEL_TYPE: "bert-base" 12 | BACKBONE: 13 | NAME: "D2SwinTransformer" 14 | SWIN: 15 | NAME: "large" 16 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 17 | DDETRS: 18 | IN_FEATURES: ["res3", "res4", "res5"] 19 | TO_AGNOSTIC: True 20 | NUM_CLASSES: 1 21 | NUM_OBJECT_QUERIES: 300 22 | TWO_STAGE_NUM_PROPOSALS: 300 23 | DROPOUT: 0.0 24 | TWO_STAGE: True 25 | MIXED_SELECTION: True 26 | LOOK_FORWARD_TWICE: True 27 | USE_BACKBONE_FEATURE: True 28 | DATASETS: 29 | TRAIN: ("coco_2017_train", "refcoco-mixed") 30 | TEST: ("refcoco-unc-val",) 31 | SOLVER: 32 | IMS_PER_BATCH: 16 # batch-per-gpu = 2 33 | BASE_LR: 0.0002 34 | STEPS: (75000,) # (76658,) 35 | MAX_ITER: 90000 # 91990 36 | WARMUP_FACTOR: 1.0 37 | WARMUP_ITERS: 200 38 | WEIGHT_DECAY: 0.05 39 | OPTIMIZER: "ADAMW" 40 | BACKBONE_MULTIPLIER: 0.1 41 | CLIP_GRADIENTS: 42 | ENABLED: True 43 | CLIP_TYPE: "full_model" 44 | CLIP_VALUE: 0.1 45 | NORM_TYPE: 2.0 46 | CHECKPOINT_PERIOD: 5000 47 | INPUT: 48 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 49 | CROP: 50 | ENABLED: True 51 | TYPE: "absolute_range" 52 | SIZE: (384, 600) 53 | FORMAT: "RGB" 54 | TEST: 55 | EVAL_PERIOD: 5000 56 | DATALOADER: 57 | SAMPLER_TRAIN: "MultiDatasetSampler" 58 | DATASET_RATIO: [1, 1] 59 | USE_DIFF_BS_SIZE: True 60 | DATASET_BS: [2, 2] 61 | USE_RFS: [False, False] 62 | FILTER_EMPTY_ANNOTATIONS: True 63 | NUM_WORKERS: 8 64 | OUTPUT_DIR: outputs/joint_task_det_rec_swin-l_16gpu 65 | VERSION: 2 66 | -------------------------------------------------------------------------------- /projects/UniRef/configs/image/joint_task_finetune_det_rec_fss_r50_16gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "UniRef" 3 | WEIGHTS: "pretrained_models/R-50.pkl" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: True 7 | OTA: True 8 | WITH_MASK_REF: True 9 | WITH_LANG_REF: True 10 | LANG_CONFIG: 11 | FREEZE_TEXT_ENCODER: False 12 | MODEL_TYPE: "bert-base" 13 | RESNETS: 14 | DEPTH: 50 15 | STRIDE_IN_1X1: False 16 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 17 | DDETRS: 18 | IN_FEATURES: ["res3", "res4", "res5"] 19 | TO_AGNOSTIC: True 20 | NUM_CLASSES: 1 21 | NUM_OBJECT_QUERIES: 300 22 | TWO_STAGE_NUM_PROPOSALS: 300 23 | DROPOUT: 0.0 24 | TWO_STAGE: True 25 | MIXED_SELECTION: True 26 | LOOK_FORWARD_TWICE: True 27 | USE_BACKBONE_FEATURE: True 28 | DATASETS: 29 | TRAIN: ("coco_2017_train", "refcoco-mixed", "fss-1000-train") 30 | TEST: ("refcoco-unc-val", "fss-1000-val") 31 | SOLVER: 32 | IMS_PER_BATCH: 16 # batch-per-gpu = 2 33 | BASE_LR: 0.00002 34 | STEPS: () 35 | MAX_ITER: 30000 36 | WARMUP_FACTOR: 1.0 37 | WARMUP_ITERS: 200 38 | WEIGHT_DECAY: 0.05 39 | OPTIMIZER: "ADAMW" 40 | BACKBONE_MULTIPLIER: 0.1 41 | CLIP_GRADIENTS: 42 | ENABLED: True 43 | CLIP_TYPE: "full_model" 44 | CLIP_VALUE: 0.1 45 | NORM_TYPE: 2.0 46 | CHECKPOINT_PERIOD: 5000 47 | INPUT: 48 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 49 | CROP: 50 | ENABLED: True 51 | TYPE: "absolute_range" 52 | SIZE: (384, 600) 53 | FORMAT: "RGB" 54 | TEST: 55 | EVAL_PERIOD: 5000 56 | DATALOADER: 57 | SAMPLER_TRAIN: "MultiDatasetSampler" 58 | DATASET_RATIO: [1, 1, 0.05] 59 | USE_DIFF_BS_SIZE: True 60 | DATASET_BS: [2, 2, 2] 61 | USE_RFS: [False, False, False] 62 | FILTER_EMPTY_ANNOTATIONS: True 63 | NUM_WORKERS: 8 64 | OUTPUT_DIR: outputs/joint_task_det_rec_fss_r50_16gpu 65 | VERSION: 2 66 | -------------------------------------------------------------------------------- /projects/UniRef/configs/pretrain/obj365v2_r50_32gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "UniRef" 3 | WEIGHTS: "pretrained_models/R-50.pkl" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: True 7 | OTA: True 8 | USE_EARLY_FUSION: False 9 | BOXINST: 10 | ENABLED: True 11 | RESNETS: 12 | DEPTH: 50 13 | STRIDE_IN_1X1: False 14 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 15 | DDETRS: 16 | IN_FEATURES: ["res3", "res4", "res5"] 17 | NUM_CLASSES: 365 18 | NUM_OBJECT_QUERIES: 300 19 | TWO_STAGE_NUM_PROPOSALS: 300 20 | DROPOUT: 0.0 21 | TWO_STAGE: True 22 | MIXED_SELECTION: True 23 | LOOK_FORWARD_TWICE: True 24 | USE_BACKBONE_FEATURE: True 25 | DATASETS: 26 | TRAIN: ("objects365_v2_train",) 27 | TEST: ("objects365_v2_val",) 28 | SOLVER: 29 | IMS_PER_BATCH: 64 # batch-per-gpu = 2, 32gpu 30 | BASE_LR: 0.0002 31 | STEPS: (312346,) 32 | MAX_ITER: 340741 # 1817287 images per epoch. 1817287 x 12 epoch / (2 x 32GPU) 33 | WARMUP_FACTOR: 1.0 34 | WARMUP_ITERS: 200 35 | WEIGHT_DECAY: 0.05 36 | OPTIMIZER: "ADAMW" 37 | BACKBONE_MULTIPLIER: 0.1 38 | CLIP_GRADIENTS: 39 | ENABLED: True 40 | CLIP_TYPE: "full_model" 41 | CLIP_VALUE: 0.1 42 | NORM_TYPE: 2.0 43 | CHECKPOINT_PERIOD: 2500 44 | INPUT: 45 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 46 | CROP: 47 | ENABLED: True 48 | TYPE: "absolute_range" 49 | SIZE: (384, 600) 50 | FORMAT: "RGB" 51 | TEST: 52 | EVAL_PERIOD: 0 53 | DATALOADER: 54 | SAMPLER_TRAIN: "ClassAwareSamplerr" 55 | FILTER_EMPTY_ANNOTATIONS: True 56 | NUM_WORKERS: 8 57 | OUTPUT_DIR: outputs/obj365v2_r50_32gpu 58 | FIND_UNUSED_PARAMETERS: False 59 | VERSION: 2 60 | -------------------------------------------------------------------------------- /projects/UniRef/configs/pretrain/obj365v2_swin-l_32gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "UniRef" 3 | WEIGHTS: "pretrained_models/swin_large_patch4_window12_384_22k.pkl" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: True 7 | OTA: True 8 | USE_EARLY_FUSION: False 9 | BOXINST: 10 | ENABLED: True 11 | BACKBONE: 12 | NAME: "D2SwinTransformer" 13 | SWIN: 14 | NAME: "large" 15 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 16 | DDETRS: 17 | IN_FEATURES: ["res3", "res4", "res5"] 18 | NUM_CLASSES: 365 19 | NUM_OBJECT_QUERIES: 300 20 | TWO_STAGE_NUM_PROPOSALS: 300 21 | DROPOUT: 0.0 22 | TWO_STAGE: True 23 | MIXED_SELECTION: True 24 | LOOK_FORWARD_TWICE: True 25 | USE_BACKBONE_FEATURE: True 26 | DATASETS: 27 | TRAIN: ("objects365_v2_train",) 28 | TEST: ("objects365_v2_val",) 29 | SOLVER: 30 | IMS_PER_BATCH: 64 # batch-per-gpu = 2, 32gpu 31 | BASE_LR: 0.0002 32 | STEPS: (312346,) 33 | MAX_ITER: 340741 # 1817287 images per epoch. 1817287 x 12 epoch / (2 x 32GPU) 34 | WARMUP_FACTOR: 1.0 35 | WARMUP_ITERS: 200 36 | WEIGHT_DECAY: 0.05 37 | OPTIMIZER: "ADAMW" 38 | BACKBONE_MULTIPLIER: 0.1 39 | CLIP_GRADIENTS: 40 | ENABLED: True 41 | CLIP_TYPE: "full_model" 42 | CLIP_VALUE: 0.1 43 | NORM_TYPE: 2.0 44 | CHECKPOINT_PERIOD: 2500 45 | INPUT: 46 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 47 | CROP: 48 | ENABLED: True 49 | TYPE: "absolute_range" 50 | SIZE: (384, 600) 51 | FORMAT: "RGB" 52 | TEST: 53 | EVAL_PERIOD: 0 54 | DATALOADER: 55 | SAMPLER_TRAIN: "ClassAwareSamplerr" 56 | FILTER_EMPTY_ANNOTATIONS: True 57 | NUM_WORKERS: 8 58 | OUTPUT_DIR: outputs/obj365v2_swin-l_32gpu 59 | FIND_UNUSED_PARAMETERS: False 60 | VERSION: 2 61 | -------------------------------------------------------------------------------- /projects/UniRef/configs/sam/eval/eval_sam_fss.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../sam_image_joint_rec_fss_8gpu.yaml" 2 | DATASETS: 3 | TEST: ("fss-1000-val",) -------------------------------------------------------------------------------- /projects/UniRef/configs/sam/eval/eval_sam_rec.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../sam_image_joint_rec_fss_8gpu.yaml" 2 | DATASETS: 3 | TEST: ("refcoco-unc-val",) -------------------------------------------------------------------------------- /projects/UniRef/configs/sam/eval/eval_sam_rvos.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../sam_video_joint_vos_rvos_8gpu.yaml" 2 | DATASETS: 3 | TEST: 4 | - "refytvos-val" 5 | - "refdavis-val-0" 6 | - "refdavis-val-1" 7 | - "refdavis-val-2" 8 | - "refdavis-val-3" -------------------------------------------------------------------------------- /projects/UniRef/configs/sam/eval/eval_sam_vos.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: "../sam_video_joint_vos_rvos_8gpu.yaml" 2 | DATASETS: 3 | TEST: 4 | - "ytbvos18-val" 5 | - "ytbvos19-val" 6 | - "davis17-val" 7 | - "vos-lvos-val" 8 | - "mose-val" -------------------------------------------------------------------------------- /projects/UniRef/configs/sam/sam_image_joint_rec_fss_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "UniRef_SAM" 3 | WEIGHTS: "" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: True 7 | WITH_MASK_REF: True 8 | WITH_LANG_REF: True 9 | LANG_CONFIG: 10 | FREEZE_TEXT_ENCODER: False 11 | MODEL_TYPE: "bert-base" 12 | DATASETS: 13 | TRAIN: ("refcoco-mixed", "fss-1000-train") 14 | TEST: ("refcoco-unc-val", "fss-1000-val") 15 | SOLVER: 16 | IMS_PER_BATCH: 16 # batch-per-gpu = 2 17 | BASE_LR: 0.0002 18 | STEPS: (75000,) 19 | MAX_ITER: 90000 20 | WARMUP_FACTOR: 1.0 21 | WARMUP_ITERS: 200 22 | WEIGHT_DECAY: 0.05 23 | OPTIMIZER: "ADAMW" 24 | BACKBONE_MULTIPLIER: 0.1 25 | CLIP_GRADIENTS: 26 | ENABLED: True 27 | CLIP_TYPE: "full_model" 28 | CLIP_VALUE: 0.1 29 | NORM_TYPE: 2.0 30 | CHECKPOINT_PERIOD: 5000 31 | INPUT: 32 | DATASET_MAPPER_NAME: "sam" 33 | CROP: 34 | ENABLED: True 35 | TYPE: "absolute_range" 36 | SIZE: (384, 600) 37 | FORMAT: "RGB" 38 | TEST: 39 | EVAL_PERIOD: 5000 40 | DATALOADER: 41 | SAMPLER_TRAIN: "MultiDatasetSampler" 42 | DATASET_RATIO: [1, 0.05] 43 | USE_DIFF_BS_SIZE: True 44 | DATASET_BS: [2, 2] 45 | USE_RFS: [False, False] 46 | FILTER_EMPTY_ANNOTATIONS: True 47 | NUM_WORKERS: 8 48 | OUTPUT_DIR: outputs/sam_image_joint_rec_fss_8gpu 49 | VERSION: 2 50 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import add_uniref_config 2 | from .uniref import UniRef 3 | from .uniref_sam import UniRef_SAM 4 | from .data import build_detection_train_loader, build_detection_test_loader 5 | from .data.datasets.objects365 import categories 6 | from .data.datasets.objects365_v2 import categories 7 | from .backbone.swin import D2SwinTransformer 8 | from .backbone.convnext import D2ConvNeXt 9 | from .backbone.vit import D2ViT -------------------------------------------------------------------------------- /projects/UniRef/uniref/backbone/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/projects/UniRef/uniref/backbone/__init__.py -------------------------------------------------------------------------------- /projects/UniRef/uniref/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .dataset_mapper import CocoDatasetMapper 2 | from .coco_dataset_mapper import DetrDatasetMapper 3 | from .ytvis_dataset_mapper import YTVISDatasetMapper 4 | from .sam_dataset_mapper import SamDatasetMapper 5 | from .ytvis_sam_dataset_mapper import YTVISSamDatasetMapper 6 | 7 | from .mixup import MapDatasetMixup 8 | from .build import * 9 | from .datasets import * 10 | from .custom_dataset_dataloader import * 11 | 12 | from .ytvis_eval import YTVISEvaluator 13 | 14 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from . import builtin # ensure the builtin datasets are registered 2 | 3 | __all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] 4 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------ 2 | # Deformable DETR 3 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 5 | # ------------------------------------------------------------------------ 6 | # Modified from DETR (https://github.com/facebookresearch/detr) 7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 8 | # ------------------------------------------------------------------------ 9 | 10 | 11 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/ops/functions/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------------------------ 2 | # Deformable DETR 3 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 5 | # ------------------------------------------------------------------------------------------------ 6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 7 | # ------------------------------------------------------------------------------------------------ 8 | 9 | from .ms_deform_attn_func import MSDeformAttnFunction 10 | 11 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/ops/make.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ------------------------------------------------------------------------------------------------ 3 | # Deformable DETR 4 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 5 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 6 | # ------------------------------------------------------------------------------------------------ 7 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 8 | # ------------------------------------------------------------------------------------------------ 9 | 10 | python3 setup.py build install --user 11 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/ops/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------------------------ 2 | # Deformable DETR 3 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 5 | # ------------------------------------------------------------------------------------------------ 6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 7 | # ------------------------------------------------------------------------------------------------ 8 | 9 | from .ms_deform_attn import MSDeformAttn 10 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/ops/src/cpu/ms_deform_attn_cpu.cpp: -------------------------------------------------------------------------------- 1 | /*! 2 | ************************************************************************************************** 3 | * Deformable DETR 4 | * Copyright (c) 2020 SenseTime. All Rights Reserved. 5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details] 6 | ************************************************************************************************** 7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 8 | ************************************************************************************************** 9 | */ 10 | 11 | #include 12 | 13 | #include 14 | #include 15 | 16 | 17 | at::Tensor 18 | ms_deform_attn_cpu_forward( 19 | const at::Tensor &value, 20 | const at::Tensor &spatial_shapes, 21 | const at::Tensor &level_start_index, 22 | const at::Tensor &sampling_loc, 23 | const at::Tensor &attn_weight, 24 | const int im2col_step) 25 | { 26 | AT_ERROR("Not implement on cpu"); 27 | } 28 | 29 | std::vector 30 | ms_deform_attn_cpu_backward( 31 | const at::Tensor &value, 32 | const at::Tensor &spatial_shapes, 33 | const at::Tensor &level_start_index, 34 | const at::Tensor &sampling_loc, 35 | const at::Tensor &attn_weight, 36 | const at::Tensor &grad_output, 37 | const int im2col_step) 38 | { 39 | AT_ERROR("Not implement on cpu"); 40 | } 41 | 42 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/ops/src/cpu/ms_deform_attn_cpu.h: -------------------------------------------------------------------------------- 1 | /*! 2 | ************************************************************************************************** 3 | * Deformable DETR 4 | * Copyright (c) 2020 SenseTime. All Rights Reserved. 5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details] 6 | ************************************************************************************************** 7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 8 | ************************************************************************************************** 9 | */ 10 | 11 | #pragma once 12 | #include 13 | 14 | at::Tensor 15 | ms_deform_attn_cpu_forward( 16 | const at::Tensor &value, 17 | const at::Tensor &spatial_shapes, 18 | const at::Tensor &level_start_index, 19 | const at::Tensor &sampling_loc, 20 | const at::Tensor &attn_weight, 21 | const int im2col_step); 22 | 23 | std::vector 24 | ms_deform_attn_cpu_backward( 25 | const at::Tensor &value, 26 | const at::Tensor &spatial_shapes, 27 | const at::Tensor &level_start_index, 28 | const at::Tensor &sampling_loc, 29 | const at::Tensor &attn_weight, 30 | const at::Tensor &grad_output, 31 | const int im2col_step); 32 | 33 | 34 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/ops/src/cuda/ms_deform_attn_cuda.h: -------------------------------------------------------------------------------- 1 | /*! 2 | ************************************************************************************************** 3 | * Deformable DETR 4 | * Copyright (c) 2020 SenseTime. All Rights Reserved. 5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details] 6 | ************************************************************************************************** 7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 8 | ************************************************************************************************** 9 | */ 10 | 11 | #pragma once 12 | #include 13 | 14 | at::Tensor ms_deform_attn_cuda_forward( 15 | const at::Tensor &value, 16 | const at::Tensor &spatial_shapes, 17 | const at::Tensor &level_start_index, 18 | const at::Tensor &sampling_loc, 19 | const at::Tensor &attn_weight, 20 | const int im2col_step); 21 | 22 | std::vector ms_deform_attn_cuda_backward( 23 | const at::Tensor &value, 24 | const at::Tensor &spatial_shapes, 25 | const at::Tensor &level_start_index, 26 | const at::Tensor &sampling_loc, 27 | const at::Tensor &attn_weight, 28 | const at::Tensor &grad_output, 29 | const int im2col_step); 30 | 31 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/deformable_detr/ops/src/vision.cpp: -------------------------------------------------------------------------------- 1 | /*! 2 | ************************************************************************************************** 3 | * Deformable DETR 4 | * Copyright (c) 2020 SenseTime. All Rights Reserved. 5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details] 6 | ************************************************************************************************** 7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 8 | ************************************************************************************************** 9 | */ 10 | 11 | #include "ms_deform_attn.h" 12 | 13 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 14 | m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward"); 15 | m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward"); 16 | } 17 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/fuse_helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/projects/UniRef/uniref/models/fuse_helper/__init__.py -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/segment_anything/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .automatic_mask_generator import SamAutomaticMaskGenerator 8 | from .build_sam import (build_sam, build_sam_vit_b, build_sam_vit_h, 9 | build_sam_vit_l, sam_model_registry) 10 | from .predictor import SamPredictor 11 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/segment_anything/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .image_encoder import ImageEncoderViT 8 | from .mask_decoder import MaskDecoder 9 | from .prompt_encoder import PromptEncoder 10 | from .sam import Sam 11 | from .transformer import TwoWayTransformer 12 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/segment_anything/modeling/common.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from typing import Type 8 | 9 | import torch 10 | import torch.nn as nn 11 | 12 | 13 | class MLPBlock(nn.Module): 14 | def __init__( 15 | self, 16 | embedding_dim: int, 17 | mlp_dim: int, 18 | act: Type[nn.Module] = nn.GELU, 19 | ) -> None: 20 | super().__init__() 21 | self.lin1 = nn.Linear(embedding_dim, mlp_dim) 22 | self.lin2 = nn.Linear(mlp_dim, embedding_dim) 23 | self.act = act() 24 | 25 | def forward(self, x: torch.Tensor) -> torch.Tensor: 26 | return self.lin2(self.act(self.lin1(x))) 27 | 28 | 29 | # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa 30 | # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa 31 | class LayerNorm2d(nn.Module): 32 | def __init__(self, num_channels: int, eps: float = 1e-6) -> None: 33 | super().__init__() 34 | self.weight = nn.Parameter(torch.ones(num_channels)) 35 | self.bias = nn.Parameter(torch.zeros(num_channels)) 36 | self.eps = eps 37 | 38 | def forward(self, x: torch.Tensor) -> torch.Tensor: 39 | u = x.mean(1, keepdim=True) 40 | s = (x - u).pow(2).mean(1, keepdim=True) 41 | x = (x - u) / torch.sqrt(s + self.eps) 42 | x = self.weight[:, None, None] * x + self.bias[:, None, None] 43 | return x 44 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/segment_anything/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /projects/UniRef/uniref/models/vos_helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/projects/UniRef/uniref/models/vos_helper/__init__.py -------------------------------------------------------------------------------- /projects/UniRef/uniref/util/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------ 2 | # Deformable DETR 3 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 5 | # ------------------------------------------------------------------------ 6 | # Modified from DETR (https://github.com/facebookresearch/detr) 7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 8 | # ------------------------------------------------------------------------ 9 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cython 2 | numpy==1.23.0 3 | pydantic==1.10.0 4 | scipy 5 | tqdm 6 | einops 7 | shapely 8 | pyarrow 9 | timm==0.9.2 10 | h5py 11 | submitit 12 | scikit-image 13 | progressbar2 14 | omegaconf 15 | motmetrics 16 | fairscale 17 | transformers==4.31.0 18 | sentencepiece 19 | accelerate==0.21.0 20 | bitsandbytes 21 | deepspeed==0.10.1 22 | peft==0.5.0 23 | 24 | 25 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length=100 3 | multi_line_output=3 4 | include_trailing_comma=True 5 | known_standard_library=numpy,setuptools,mock 6 | skip=./datasets,docs 7 | skip_glob=*/__init__.py,**/configs/**,tests/config/** 8 | known_myself=detectron2 9 | known_third_party=fvcore,matplotlib,cv2,torch,torchvision,PIL,pycocotools,yacs,termcolor,cityscapesscripts,tabulate,tqdm,scipy,lvis,psutil,pkg_resources,caffe2,onnx,panopticapi,black,isort,av,iopath,omegaconf,hydra,yaml,pydoc,submitit,cloudpickle 10 | no_lines_before=STDLIB,THIRDPARTY 11 | sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER 12 | default_section=FIRSTPARTY 13 | 14 | [mypy] 15 | python_version=3.6 16 | ignore_missing_imports = True 17 | warn_unused_configs = True 18 | disallow_untyped_defs = True 19 | check_untyped_defs = True 20 | warn_unused_ignores = True 21 | warn_redundant_casts = True 22 | show_column_numbers = True 23 | follow_imports = silent 24 | allow_redefinition = True 25 | ; Require all functions to be annotated 26 | disallow_incomplete_defs = True 27 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | ## Unit Tests 2 | 3 | To run the unittests, do: 4 | ``` 5 | cd detectron2 6 | python -m unittest discover -v -s ./tests 7 | ``` 8 | 9 | There are also end-to-end inference & training tests, in [dev/run_*_tests.sh](../dev). 10 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /tests/config/dir1/dir1_a.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | dir1a_str = "base_a_1" 3 | dir1a_dict = {"a": 1, "b": 2} 4 | -------------------------------------------------------------------------------- /tests/config/dir1/dir1_b.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from detectron2.config import LazyConfig 3 | 4 | # equivalent to relative import 5 | dir1a_str, dir1a_dict = LazyConfig.load_rel("dir1_a.py", ("dir1a_str", "dir1a_dict")) 6 | 7 | dir1b_str = dir1a_str + "_from_b" 8 | dir1b_dict = dir1a_dict 9 | 10 | # Every import is a reload: not modified by other config files 11 | assert dir1a_dict.a == 1 12 | -------------------------------------------------------------------------------- /tests/config/root_cfg.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from itertools import count 3 | 4 | from detectron2.config import LazyCall as L 5 | 6 | from .dir1.dir1_a import dir1a_dict, dir1a_str 7 | 8 | dir1a_dict.a = "modified" 9 | 10 | # modification above won't affect future imports 11 | from .dir1.dir1_b import dir1b_dict, dir1b_str 12 | 13 | 14 | lazyobj = L(count)(x=dir1a_str, y=dir1b_str) 15 | -------------------------------------------------------------------------------- /tests/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/tests/data/__init__.py -------------------------------------------------------------------------------- /tests/layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/tests/layers/__init__.py -------------------------------------------------------------------------------- /tests/layers/test_nms.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from __future__ import absolute_import, division, print_function, unicode_literals 3 | import unittest 4 | import torch 5 | 6 | from detectron2.layers import batched_nms 7 | from detectron2.utils.testing import random_boxes 8 | 9 | 10 | class TestNMS(unittest.TestCase): 11 | def _create_tensors(self, N): 12 | boxes = random_boxes(N, 200) 13 | scores = torch.rand(N) 14 | return boxes, scores 15 | 16 | def test_nms_scriptability(self): 17 | N = 2000 18 | num_classes = 50 19 | boxes, scores = self._create_tensors(N) 20 | idxs = torch.randint(0, num_classes, (N,)) 21 | scripted_batched_nms = torch.jit.script(batched_nms) 22 | err_msg = "NMS is incompatible with jit-scripted NMS for IoU={}" 23 | 24 | for iou in [0.2, 0.5, 0.8]: 25 | keep_ref = batched_nms(boxes, scores, idxs, iou) 26 | backup = boxes.clone() 27 | scripted_keep = scripted_batched_nms(boxes, scores, idxs, iou) 28 | assert torch.allclose(boxes, backup), "boxes modified by jit-scripted batched_nms" 29 | self.assertTrue(torch.equal(keep_ref, scripted_keep), err_msg.format(iou)) 30 | 31 | 32 | if __name__ == "__main__": 33 | unittest.main() 34 | -------------------------------------------------------------------------------- /tests/modeling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/tests/modeling/__init__.py -------------------------------------------------------------------------------- /tests/modeling/test_backbone.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import unittest 4 | import torch 5 | 6 | import detectron2.export.torchscript # apply patch # noqa 7 | from detectron2 import model_zoo 8 | from detectron2.config import get_cfg 9 | from detectron2.layers import ShapeSpec 10 | from detectron2.modeling.backbone import build_resnet_backbone 11 | from detectron2.modeling.backbone.fpn import build_resnet_fpn_backbone 12 | 13 | 14 | class TestBackBone(unittest.TestCase): 15 | def test_resnet_scriptability(self): 16 | cfg = get_cfg() 17 | resnet = build_resnet_backbone(cfg, ShapeSpec(channels=3)) 18 | 19 | scripted_resnet = torch.jit.script(resnet) 20 | 21 | inp = torch.rand(2, 3, 100, 100) 22 | out1 = resnet(inp)["res4"] 23 | out2 = scripted_resnet(inp)["res4"] 24 | self.assertTrue(torch.allclose(out1, out2)) 25 | 26 | def test_fpn_scriptability(self): 27 | cfg = model_zoo.get_config("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml") 28 | bb = build_resnet_fpn_backbone(cfg, ShapeSpec(channels=3)) 29 | bb_s = torch.jit.script(bb) 30 | 31 | inp = torch.rand(2, 3, 128, 128) 32 | out1 = bb(inp)["p5"] 33 | out2 = bb_s(inp)["p5"] 34 | self.assertTrue(torch.allclose(out1, out2)) 35 | -------------------------------------------------------------------------------- /tests/modeling/test_matcher.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import unittest 3 | from typing import List 4 | import torch 5 | 6 | from detectron2.config import get_cfg 7 | from detectron2.modeling.matcher import Matcher 8 | 9 | 10 | class TestMatcher(unittest.TestCase): 11 | def test_scriptability(self): 12 | cfg = get_cfg() 13 | anchor_matcher = Matcher( 14 | cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True 15 | ) 16 | match_quality_matrix = torch.tensor( 17 | [[0.15, 0.45, 0.2, 0.6], [0.3, 0.65, 0.05, 0.1], [0.05, 0.4, 0.25, 0.4]] 18 | ) 19 | expected_matches = torch.tensor([1, 1, 2, 0]) 20 | expected_match_labels = torch.tensor([-1, 1, 0, 1], dtype=torch.int8) 21 | 22 | matches, match_labels = anchor_matcher(match_quality_matrix) 23 | self.assertTrue(torch.allclose(matches, expected_matches)) 24 | self.assertTrue(torch.allclose(match_labels, expected_match_labels)) 25 | 26 | # nonzero_tuple must be import explicitly to let jit know what it is. 27 | # https://github.com/pytorch/pytorch/issues/38964 28 | from detectron2.layers import nonzero_tuple # noqa F401 29 | 30 | def f(thresholds: List[float], labels: List[int]): 31 | return Matcher(thresholds, labels, allow_low_quality_matches=True) 32 | 33 | scripted_anchor_matcher = torch.jit.script(f)( 34 | cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS 35 | ) 36 | matches, match_labels = scripted_anchor_matcher(match_quality_matrix) 37 | self.assertTrue(torch.allclose(matches, expected_matches)) 38 | self.assertTrue(torch.allclose(match_labels, expected_match_labels)) 39 | 40 | 41 | if __name__ == "__main__": 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /tests/structures/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/tests/structures/__init__.py -------------------------------------------------------------------------------- /tests/structures/test_keypoints.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import unittest 3 | import torch 4 | 5 | from detectron2.structures.keypoints import Keypoints 6 | 7 | 8 | class TestKeypoints(unittest.TestCase): 9 | def test_cat_keypoints(self): 10 | keypoints1 = Keypoints(torch.rand(2, 21, 3)) 11 | keypoints2 = Keypoints(torch.rand(4, 21, 3)) 12 | 13 | cat_keypoints = keypoints1.cat([keypoints1, keypoints2]) 14 | self.assertTrue(torch.all(cat_keypoints.tensor[:2] == keypoints1.tensor).item()) 15 | self.assertTrue(torch.all(cat_keypoints.tensor[2:] == keypoints2.tensor).item()) 16 | 17 | 18 | if __name__ == "__main__": 19 | unittest.main() 20 | -------------------------------------------------------------------------------- /tests/test_checkpoint.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import unittest 3 | from collections import OrderedDict 4 | import torch 5 | from torch import nn 6 | 7 | from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts 8 | from detectron2.utils.logger import setup_logger 9 | 10 | 11 | class TestCheckpointer(unittest.TestCase): 12 | def setUp(self): 13 | setup_logger() 14 | 15 | def create_complex_model(self): 16 | m = nn.Module() 17 | m.block1 = nn.Module() 18 | m.block1.layer1 = nn.Linear(2, 3) 19 | m.layer2 = nn.Linear(3, 2) 20 | m.res = nn.Module() 21 | m.res.layer2 = nn.Linear(3, 2) 22 | 23 | state_dict = OrderedDict() 24 | state_dict["layer1.weight"] = torch.rand(3, 2) 25 | state_dict["layer1.bias"] = torch.rand(3) 26 | state_dict["layer2.weight"] = torch.rand(2, 3) 27 | state_dict["layer2.bias"] = torch.rand(2) 28 | state_dict["res.layer2.weight"] = torch.rand(2, 3) 29 | state_dict["res.layer2.bias"] = torch.rand(2) 30 | return m, state_dict 31 | 32 | def test_complex_model_loaded(self): 33 | for add_data_parallel in [False, True]: 34 | model, state_dict = self.create_complex_model() 35 | if add_data_parallel: 36 | model = nn.DataParallel(model) 37 | model_sd = model.state_dict() 38 | 39 | sd_to_load = align_and_update_state_dicts(model_sd, state_dict) 40 | model.load_state_dict(sd_to_load) 41 | for loaded, stored in zip(model_sd.values(), state_dict.values()): 42 | # different tensor references 43 | self.assertFalse(id(loaded) == id(stored)) 44 | # same content 45 | self.assertTrue(loaded.to(stored).equal(stored)) 46 | 47 | 48 | if __name__ == "__main__": 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /tests/test_model_zoo.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import logging 3 | import unittest 4 | 5 | from detectron2 import model_zoo 6 | from detectron2.config import instantiate 7 | from detectron2.modeling import FPN, GeneralizedRCNN 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class TestModelZoo(unittest.TestCase): 13 | def test_get_returns_model(self): 14 | model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False) 15 | self.assertIsInstance(model, GeneralizedRCNN) 16 | self.assertIsInstance(model.backbone, FPN) 17 | 18 | def test_get_invalid_model(self): 19 | self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml") 20 | 21 | def test_get_url(self): 22 | url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml") 23 | self.assertEqual( 24 | url, 25 | "https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa 26 | ) 27 | url2 = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.py") 28 | self.assertEqual(url, url2) 29 | 30 | def _build_lazy_model(self, name): 31 | cfg = model_zoo.get_config("common/models/" + name) 32 | instantiate(cfg.model) 33 | 34 | def test_mask_rcnn_fpn(self): 35 | self._build_lazy_model("mask_rcnn_fpn.py") 36 | 37 | def test_mask_rcnn_c4(self): 38 | self._build_lazy_model("mask_rcnn_c4.py") 39 | 40 | def test_panoptic_fpn(self): 41 | self._build_lazy_model("panoptic_fpn.py") 42 | 43 | def test_schedule(self): 44 | cfg = model_zoo.get_config("common/coco_schedule.py") 45 | for _, v in cfg.items(): 46 | instantiate(v) 47 | 48 | 49 | if __name__ == "__main__": 50 | unittest.main() 51 | -------------------------------------------------------------------------------- /tests/test_packaging.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import unittest 3 | 4 | from detectron2.utils.collect_env import collect_env_info 5 | 6 | 7 | class TestProjects(unittest.TestCase): 8 | def test_import(self): 9 | from detectron2.projects import point_rend 10 | 11 | _ = point_rend.add_pointrend_config 12 | 13 | import detectron2.projects.deeplab as deeplab 14 | 15 | _ = deeplab.add_deeplab_config 16 | 17 | # import detectron2.projects.panoptic_deeplab as panoptic_deeplab 18 | 19 | # _ = panoptic_deeplab.add_panoptic_deeplab_config 20 | 21 | 22 | class TestCollectEnv(unittest.TestCase): 23 | def test(self): 24 | _ = collect_env_info() 25 | -------------------------------------------------------------------------------- /tests/test_registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import unittest 3 | import torch 4 | 5 | from detectron2.modeling.meta_arch import GeneralizedRCNN 6 | from detectron2.utils.registry import _convert_target_to_string, locate 7 | 8 | 9 | class A: 10 | class B: 11 | pass 12 | 13 | 14 | class TestLocate(unittest.TestCase): 15 | def _test_obj(self, obj): 16 | name = _convert_target_to_string(obj) 17 | newobj = locate(name) 18 | self.assertIs(obj, newobj) 19 | 20 | def test_basic(self): 21 | self._test_obj(GeneralizedRCNN) 22 | 23 | def test_inside_class(self): 24 | # requires using __qualname__ instead of __name__ 25 | self._test_obj(A.B) 26 | 27 | def test_builtin(self): 28 | self._test_obj(len) 29 | self._test_obj(dict) 30 | 31 | def test_pytorch_optim(self): 32 | # pydoc.locate does not work for it 33 | self._test_obj(torch.optim.SGD) 34 | 35 | def test_failure(self): 36 | with self.assertRaises(ImportError): 37 | locate("asdf") 38 | 39 | def test_compress_target(self): 40 | from detectron2.data.transforms import RandomCrop 41 | 42 | name = _convert_target_to_string(RandomCrop) 43 | # name shouldn't contain 'augmentation_impl' 44 | self.assertEqual(name, "detectron2.data.transforms.RandomCrop") 45 | self.assertIs(RandomCrop, locate(name)) 46 | -------------------------------------------------------------------------------- /tests/tracking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/tests/tracking/__init__.py -------------------------------------------------------------------------------- /tools/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains a few example scripts that demonstrate features of detectron2. 3 | 4 | 5 | * `train_net.py` 6 | 7 | An example training script that's made to train builtin models of detectron2. 8 | 9 | For usage, see [GETTING_STARTED.md](../GETTING_STARTED.md). 10 | 11 | * `plain_train_net.py` 12 | 13 | Similar to `train_net.py`, but implements a training loop instead of using `Trainer`. 14 | This script includes fewer features but it may be more friendly to hackers. 15 | 16 | * `benchmark.py` 17 | 18 | Benchmark the training speed, inference speed or data loading speed of a given config. 19 | 20 | Usage: 21 | ``` 22 | python benchmark.py --config-file config.yaml --task train/eval/data [optional DDP flags] 23 | ``` 24 | 25 | * `analyze_model.py` 26 | 27 | Analyze FLOPs, parameters, activations of a detectron2 model. See its `--help` for usage. 28 | 29 | * `visualize_json_results.py` 30 | 31 | Visualize the json instance detection/segmentation results dumped by `COCOEvalutor` or `LVISEvaluator` 32 | 33 | Usage: 34 | ``` 35 | python visualize_json_results.py --input x.json --output dir/ --dataset coco_2017_val 36 | ``` 37 | If not using a builtin dataset, you'll need your own script or modify this script. 38 | 39 | * `visualize_data.py` 40 | 41 | Visualize ground truth raw annotations or training data (after preprocessing/augmentations). 42 | 43 | Usage: 44 | ``` 45 | python visualize_data.py --config-file config.yaml --source annotation/dataloader --output-dir dir/ [--show] 46 | ``` 47 | 48 | NOTE: the script does not stop by itself when using `--source dataloader` because a training 49 | dataloader is usually infinite. 50 | -------------------------------------------------------------------------------- /tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FoundationVision/UniRef/16442cfcf3e034d6cefedda659d41eca69137d0b/tools/__init__.py -------------------------------------------------------------------------------- /tools/convert-torchvision-to-d2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import pickle as pkl 5 | import sys 6 | import torch 7 | 8 | """ 9 | Usage: 10 | # download one of the ResNet{18,34,50,101,152} models from torchvision: 11 | wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth 12 | # run the conversion 13 | ./convert-torchvision-to-d2.py r50.pth r50.pkl 14 | 15 | # Then, use r50.pkl with the following changes in config: 16 | 17 | MODEL: 18 | WEIGHTS: "/path/to/r50.pkl" 19 | PIXEL_MEAN: [123.675, 116.280, 103.530] 20 | PIXEL_STD: [58.395, 57.120, 57.375] 21 | RESNETS: 22 | DEPTH: 50 23 | STRIDE_IN_1X1: False 24 | INPUT: 25 | FORMAT: "RGB" 26 | 27 | These models typically produce slightly worse results than the 28 | pre-trained ResNets we use in official configs, which are the 29 | original ResNet models released by MSRA. 30 | """ 31 | 32 | if __name__ == "__main__": 33 | input = sys.argv[1] 34 | 35 | obj = torch.load(input, map_location="cpu") 36 | 37 | newmodel = {} 38 | for k in list(obj.keys()): 39 | old_k = k 40 | if "layer" not in k: 41 | k = "stem." + k 42 | for t in [1, 2, 3, 4]: 43 | k = k.replace("layer{}".format(t), "res{}".format(t + 1)) 44 | for t in [1, 2, 3]: 45 | k = k.replace("bn{}".format(t), "conv{}.norm".format(t)) 46 | k = k.replace("downsample.0", "shortcut") 47 | k = k.replace("downsample.1", "shortcut.norm") 48 | print(old_k, "->", k) 49 | newmodel[k] = obj.pop(old_k).detach().numpy() 50 | 51 | res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True} 52 | 53 | with open(sys.argv[2], "wb") as f: 54 | pkl.dump(res, f) 55 | if obj: 56 | print("Unconverted keys:", obj.keys()) 57 | --------------------------------------------------------------------------------