├── detectron2 ├── model_zoo │ ├── configs │ └── __init__.py ├── utils │ ├── __init__.py │ ├── __pycache__ │ │ ├── env.cpython-36.pyc │ │ ├── env.cpython-37.pyc │ │ ├── comm.cpython-36.pyc │ │ ├── comm.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── colormap.cpython-36.pyc │ │ ├── events.cpython-36.pyc │ │ ├── events.cpython-37.pyc │ │ ├── file_io.cpython-36.pyc │ │ ├── file_io.cpython-37.pyc │ │ ├── logger.cpython-36.pyc │ │ ├── logger.cpython-37.pyc │ │ ├── memory.cpython-36.pyc │ │ ├── memory.cpython-37.pyc │ │ ├── registry.cpython-36.pyc │ │ ├── registry.cpython-37.pyc │ │ ├── serialize.cpython-36.pyc │ │ ├── serialize.cpython-37.pyc │ │ ├── visualizer.cpython-36.pyc │ │ ├── collect_env.cpython-36.pyc │ │ └── collect_env.cpython-37.pyc │ ├── README.md │ ├── serialize.py │ ├── file_io.py │ ├── registry.py │ └── memory.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ └── __init__.cpython-37.pyc ├── projects │ ├── README.md │ └── __init__.py ├── config │ ├── __pycache__ │ │ ├── lazy.cpython-36.pyc │ │ ├── lazy.cpython-37.pyc │ │ ├── compat.cpython-36.pyc │ │ ├── compat.cpython-37.pyc │ │ ├── config.cpython-36.pyc │ │ ├── config.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── defaults.cpython-36.pyc │ │ ├── defaults.cpython-37.pyc │ │ ├── instantiate.cpython-36.pyc │ │ ├── instantiate.cpython-37.pyc │ │ └── ttda_config.cpython-36.pyc │ ├── __init__.py │ └── instantiate.py ├── data │ ├── __pycache__ │ │ ├── build.cpython-36.pyc │ │ ├── common.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── catalog.cpython-36.pyc │ │ ├── dataset_mapper.cpython-36.pyc │ │ └── detection_utils.cpython-36.pyc │ ├── datasets │ │ ├── __pycache__ │ │ │ ├── coco.cpython-36.pyc │ │ │ ├── coco.cpython-37.pyc │ │ │ ├── foggy.cpython-36.pyc │ │ │ ├── foggy.cpython-37.pyc │ │ │ ├── kitti.cpython-36.pyc │ │ │ ├── kitti.cpython-37.pyc │ │ │ ├── lvis.cpython-36.pyc │ │ │ ├── lvis.cpython-37.pyc │ │ │ ├── builtin.cpython-36.pyc │ │ │ ├── builtin.cpython-37.pyc │ │ │ ├── clipart.cpython-36.pyc │ │ │ ├── clipart.cpython-37.pyc │ │ │ ├── sim10k.cpython-36.pyc │ │ │ ├── sim10k.cpython-37.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── cityscape.cpython-36.pyc │ │ │ ├── cityscape.cpython-37.pyc │ │ │ ├── cityscapes.cpython-36.pyc │ │ │ ├── pascal_voc.cpython-36.pyc │ │ │ ├── pascal_voc.cpython-37.pyc │ │ │ ├── watercolor.cpython-36.pyc │ │ │ ├── watercolor.cpython-37.pyc │ │ │ ├── builtin_meta.cpython-36.pyc │ │ │ ├── builtin_meta.cpython-37.pyc │ │ │ ├── cityscape_car.cpython-36.pyc │ │ │ ├── cityscape_car.cpython-37.pyc │ │ │ ├── cityscapes_org.cpython-36.pyc │ │ │ ├── cityscapes_org.cpython-37.pyc │ │ │ ├── coco_panoptic.cpython-36.pyc │ │ │ ├── coco_panoptic.cpython-37.pyc │ │ │ ├── cityscapes_panoptic.cpython-36.pyc │ │ │ ├── cityscapes_panoptic.cpython-37.pyc │ │ │ ├── lvis_v0_5_categories.cpython-36.pyc │ │ │ ├── lvis_v0_5_categories.cpython-37.pyc │ │ │ ├── lvis_v1_categories.cpython-36.pyc │ │ │ └── lvis_v1_categories.cpython-37.pyc │ │ ├── register_coco.py │ │ ├── README.md │ │ ├── __init__.py │ │ ├── foggy.py │ │ ├── kitti.py │ │ ├── sim10k.py │ │ ├── cityscape.py │ │ ├── cityscape_car.py │ │ ├── clipart.py │ │ ├── pascal_voc.py │ │ └── watercolor.py │ ├── samplers │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── distributed_sampler.cpython-36.pyc │ │ │ ├── distributed_sampler.cpython-37.pyc │ │ │ ├── grouped_batch_sampler.cpython-36.pyc │ │ │ └── grouped_batch_sampler.cpython-37.pyc │ │ ├── __init__.py │ │ └── grouped_batch_sampler.py │ ├── transforms │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── transform.cpython-36.pyc │ │ │ ├── transform.cpython-37.pyc │ │ │ ├── augmentation.cpython-36.pyc │ │ │ ├── augmentation.cpython-37.pyc │ │ │ ├── augmentation_impl.cpython-36.pyc │ │ │ └── augmentation_impl.cpython-37.pyc │ │ └── __init__.py │ └── __init__.py ├── layers │ ├── __pycache__ │ │ ├── aspp.cpython-36.pyc │ │ ├── aspp.cpython-37.pyc │ │ ├── nms.cpython-36.pyc │ │ ├── nms.cpython-37.pyc │ │ ├── blocks.cpython-36.pyc │ │ ├── blocks.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── mask_ops.cpython-36.pyc │ │ ├── mask_ops.cpython-37.pyc │ │ ├── roi_align.cpython-36.pyc │ │ ├── roi_align.cpython-37.pyc │ │ ├── wrappers.cpython-36.pyc │ │ ├── wrappers.cpython-37.pyc │ │ ├── batch_norm.cpython-36.pyc │ │ ├── batch_norm.cpython-37.pyc │ │ ├── deform_conv.cpython-36.pyc │ │ ├── deform_conv.cpython-37.pyc │ │ ├── shape_spec.cpython-36.pyc │ │ ├── shape_spec.cpython-37.pyc │ │ ├── rotated_boxes.cpython-36.pyc │ │ ├── rotated_boxes.cpython-37.pyc │ │ ├── roi_align_rotated.cpython-36.pyc │ │ └── roi_align_rotated.cpython-37.pyc │ ├── csrc │ │ ├── README.md │ │ ├── cuda_version.cu │ │ ├── box_iou_rotated │ │ │ ├── box_iou_rotated.h │ │ │ └── box_iou_rotated_cpu.cpp │ │ ├── nms_rotated │ │ │ ├── nms_rotated.h │ │ │ └── nms_rotated_cpu.cpp │ │ ├── ROIAlignRotated │ │ │ └── ROIAlignRotated.h │ │ ├── cocoeval │ │ │ └── cocoeval.h │ │ └── vision.cpp │ ├── rotated_boxes.py │ ├── shape_spec.py │ ├── __init__.py │ ├── roi_align.py │ ├── blocks.py │ └── roi_align_rotated.py ├── engine │ ├── __pycache__ │ │ ├── hooks.cpython-36.pyc │ │ ├── hooks.cpython-37.pyc │ │ ├── launch.cpython-36.pyc │ │ ├── launch.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── defaults.cpython-36.pyc │ │ ├── defaults.cpython-37.pyc │ │ ├── train_loop.cpython-36.pyc │ │ └── train_loop.cpython-37.pyc │ └── __init__.py ├── solver │ ├── __pycache__ │ │ ├── build.cpython-36.pyc │ │ ├── build.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── lr_scheduler.cpython-36.pyc │ │ └── lr_scheduler.cpython-37.pyc │ └── __init__.py ├── modeling │ ├── __pycache__ │ │ ├── matcher.cpython-36.pyc │ │ ├── matcher.cpython-37.pyc │ │ ├── poolers.cpython-36.pyc │ │ ├── poolers.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── sampling.cpython-36.pyc │ │ ├── sampling.cpython-37.pyc │ │ ├── box_regression.cpython-36.pyc │ │ ├── box_regression.cpython-37.pyc │ │ ├── mmdet_wrapper.cpython-36.pyc │ │ ├── mmdet_wrapper.cpython-37.pyc │ │ ├── postprocessing.cpython-36.pyc │ │ ├── postprocessing.cpython-37.pyc │ │ ├── anchor_generator.cpython-36.pyc │ │ ├── anchor_generator.cpython-37.pyc │ │ ├── test_time_augmentation.cpython-36.pyc │ │ └── test_time_augmentation.cpython-37.pyc │ ├── backbone │ │ ├── __pycache__ │ │ │ ├── fpn.cpython-36.pyc │ │ │ ├── fpn.cpython-37.pyc │ │ │ ├── build.cpython-36.pyc │ │ │ ├── build.cpython-37.pyc │ │ │ ├── regnet.cpython-36.pyc │ │ │ ├── regnet.cpython-37.pyc │ │ │ ├── resnet.cpython-36.pyc │ │ │ ├── resnet.cpython-37.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── backbone.cpython-36.pyc │ │ │ └── backbone.cpython-37.pyc │ │ ├── __init__.py │ │ ├── build.py │ │ └── backbone.py │ ├── meta_arch │ │ ├── __pycache__ │ │ │ ├── build.cpython-36.pyc │ │ │ ├── build.cpython-37.pyc │ │ │ ├── rcnn.cpython-36.pyc │ │ │ ├── rcnn.cpython-37.pyc │ │ │ ├── losses.cpython-36.pyc │ │ │ ├── losses.cpython-37.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── retinanet.cpython-36.pyc │ │ │ ├── retinanet.cpython-37.pyc │ │ │ ├── ttda_rcnn.cpython-36.pyc │ │ │ ├── ttda_rcnn.cpython-37.pyc │ │ │ ├── memory_bank.cpython-36.pyc │ │ │ ├── memory_bank.cpython-37.pyc │ │ │ ├── panoptic_fpn.cpython-36.pyc │ │ │ ├── panoptic_fpn.cpython-37.pyc │ │ │ ├── semantic_seg.cpython-36.pyc │ │ │ ├── semantic_seg.cpython-37.pyc │ │ │ ├── st_ttda_rcnn.cpython-36.pyc │ │ │ ├── ts_ensemble.cpython-36.pyc │ │ │ ├── student_ttda_rcnn.cpython-36.pyc │ │ │ ├── student_ttda_rcnn.cpython-37.pyc │ │ │ ├── teacher_ttda_rcnn.cpython-36.pyc │ │ │ ├── teacher_ttda_rcnn.cpython-37.pyc │ │ │ ├── student_ttda_mem_rcnn.cpython-36.pyc │ │ │ ├── student_ttda_mem_rcnn.cpython-37.pyc │ │ │ ├── teacher_ttda_mem_rcnn.cpython-36.pyc │ │ │ └── teacher_ttda_mem_rcnn.cpython-37.pyc │ │ ├── __init__.py │ │ ├── ts_ensemble.py │ │ ├── build.py │ │ └── losses.py │ ├── roi_heads │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── box_head.cpython-36.pyc │ │ │ ├── box_head.cpython-37.pyc │ │ │ ├── fast_rcnn.cpython-36.pyc │ │ │ ├── fast_rcnn.cpython-37.pyc │ │ │ ├── mask_head.cpython-36.pyc │ │ │ ├── mask_head.cpython-37.pyc │ │ │ ├── roi_heads.cpython-36.pyc │ │ │ ├── roi_heads.cpython-37.pyc │ │ │ ├── cascade_rcnn.cpython-36.pyc │ │ │ ├── cascade_rcnn.cpython-37.pyc │ │ │ ├── keypoint_head.cpython-36.pyc │ │ │ ├── keypoint_head.cpython-37.pyc │ │ │ ├── rotated_fast_rcnn.cpython-36.pyc │ │ │ └── rotated_fast_rcnn.cpython-37.pyc │ │ └── __init__.py │ ├── proposal_generator │ │ ├── __pycache__ │ │ │ ├── rpn.cpython-36.pyc │ │ │ ├── rpn.cpython-37.pyc │ │ │ ├── build.cpython-36.pyc │ │ │ ├── build.cpython-37.pyc │ │ │ ├── rrpn.cpython-36.pyc │ │ │ ├── rrpn.cpython-37.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── proposal_utils.cpython-36.pyc │ │ │ └── proposal_utils.cpython-37.pyc │ │ ├── __init__.py │ │ └── build.py │ ├── __init__.py │ └── sampling.py ├── structures │ ├── __pycache__ │ │ ├── boxes.cpython-36.pyc │ │ ├── boxes.cpython-37.pyc │ │ ├── masks.cpython-36.pyc │ │ ├── masks.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── image_list.cpython-36.pyc │ │ ├── image_list.cpython-37.pyc │ │ ├── instances.cpython-36.pyc │ │ ├── instances.cpython-37.pyc │ │ ├── keypoints.cpython-36.pyc │ │ ├── keypoints.cpython-37.pyc │ │ ├── rotated_boxes.cpython-36.pyc │ │ └── rotated_boxes.cpython-37.pyc │ └── __init__.py ├── checkpoint │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── catalog.cpython-36.pyc │ │ ├── catalog.cpython-37.pyc │ │ ├── c2_model_loading.cpython-36.pyc │ │ ├── c2_model_loading.cpython-37.pyc │ │ ├── detection_checkpoint.cpython-36.pyc │ │ └── detection_checkpoint.cpython-37.pyc │ └── __init__.py ├── evaluation │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── testing.cpython-36.pyc │ │ ├── testing.cpython-37.pyc │ │ ├── evaluator.cpython-36.pyc │ │ ├── evaluator.cpython-37.pyc │ │ ├── fast_eval_api.cpython-36.pyc │ │ ├── fast_eval_api.cpython-37.pyc │ │ ├── coco_evaluation.cpython-36.pyc │ │ ├── coco_evaluation.cpython-37.pyc │ │ ├── lvis_evaluation.cpython-36.pyc │ │ ├── lvis_evaluation.cpython-37.pyc │ │ ├── clipart_evaluation.cpython-36.pyc │ │ ├── clipart_evaluation.cpython-37.pyc │ │ ├── foggy_evaluation.cpython-36.pyc │ │ ├── foggy_evaluation.cpython-37.pyc │ │ ├── sem_seg_evaluation.cpython-36.pyc │ │ ├── sem_seg_evaluation.cpython-37.pyc │ │ ├── sim10k_evaluation.cpython-36.pyc │ │ ├── sim10k_evaluation.cpython-37.pyc │ │ ├── cityscape_evaluation.cpython-36.pyc │ │ ├── cityscape_evaluation.cpython-37.pyc │ │ ├── panoptic_evaluation.cpython-36.pyc │ │ ├── panoptic_evaluation.cpython-37.pyc │ │ ├── cityscapes_evaluation.cpython-36.pyc │ │ ├── cityscapes_evaluation.cpython-37.pyc │ │ ├── pascal_voc_evaluation.cpython-36.pyc │ │ ├── pascal_voc_evaluation.cpython-37.pyc │ │ ├── rotated_coco_evaluation.cpython-36.pyc │ │ ├── rotated_coco_evaluation.cpython-37.pyc │ │ ├── watercolor_evaluation.cpython-36.pyc │ │ ├── watercolor_evaluation.cpython-37.pyc │ │ ├── cityscape_car_evaluation.cpython-36.pyc │ │ └── cityscape_car_evaluation.cpython-37.pyc │ ├── __init__.py │ └── testing.py ├── export │ ├── __init__.py │ └── README.md └── __init__.py ├── imgs └── Archi.png ├── dev ├── README.md ├── packaging │ ├── README.md │ ├── build_wheel.sh │ ├── gen_wheel_index.sh │ ├── build_all_wheels.sh │ ├── gen_install_table.py │ └── pkg_helpers.bash ├── run_instant_tests.sh ├── linter.sh ├── run_inference_tests.sh └── parse_results.sh ├── reference.bib ├── configs ├── Base-RCNN-C4.yaml ├── online_da │ ├── foggy_baseline.yaml │ ├── onda_foggy_mem.yaml │ ├── onda_sim_mem.yaml │ └── onda_kitti_mem.yaml ├── Base-RCNN-DilatedC5.yaml ├── Base-RetinaNet.yaml └── Base-RCNN-FPN.yaml ├── dataset ├── prepare_for_tests.sh └── prepare_ade20k_sem_seg.py ├── tools └── deploy │ ├── CMakeLists.txt │ └── README.md ├── setup.cfg └── requirements.txt /detectron2/model_zoo/configs: -------------------------------------------------------------------------------- 1 | /media/vibsss/test_time/Github/online_da/configs -------------------------------------------------------------------------------- /detectron2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /imgs/Archi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/imgs/Archi.png -------------------------------------------------------------------------------- /detectron2/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/projects/README.md: -------------------------------------------------------------------------------- 1 | 2 | Projects live in the [`projects` directory](../../projects) under the root of this repository, but not here. 3 | -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/env.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/env.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/env.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/env.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/lazy.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/lazy.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/lazy.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/lazy.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/__pycache__/build.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/__pycache__/build.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/__pycache__/common.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/__pycache__/common.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/aspp.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/aspp.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/aspp.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/aspp.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/nms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/nms.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/nms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/nms.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/comm.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/comm.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/comm.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/comm.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/compat.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/compat.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/compat.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/compat.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/__pycache__/catalog.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/__pycache__/catalog.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/hooks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/hooks.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/hooks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/hooks.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/launch.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/launch.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/launch.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/launch.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/blocks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/blocks.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/blocks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/blocks.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/solver/__pycache__/build.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/solver/__pycache__/build.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/solver/__pycache__/build.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/solver/__pycache__/build.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/colormap.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/colormap.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/events.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/events.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/events.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/events.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/file_io.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/file_io.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/file_io.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/file_io.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/logger.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/logger.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/logger.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/logger.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/memory.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/memory.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/memory.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/memory.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/registry.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/registry.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/registry.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/registry.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/defaults.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/defaults.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/defaults.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/defaults.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/defaults.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/defaults.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/defaults.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/defaults.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/mask_ops.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/mask_ops.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/mask_ops.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/mask_ops.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/roi_align.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/roi_align.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/roi_align.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/roi_align.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/wrappers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/wrappers.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/wrappers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/wrappers.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/matcher.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/matcher.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/matcher.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/matcher.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/poolers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/poolers.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/poolers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/poolers.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/solver/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/solver/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/solver/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/solver/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/boxes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/boxes.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/boxes.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/boxes.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/masks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/masks.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/masks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/masks.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/serialize.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/serialize.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/serialize.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/serialize.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/visualizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/visualizer.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/catalog.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/catalog.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/catalog.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/catalog.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/instantiate.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/instantiate.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/instantiate.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/instantiate.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/config/__pycache__/ttda_config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/config/__pycache__/ttda_config.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/__pycache__/dataset_mapper.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/__pycache__/dataset_mapper.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/coco.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/coco.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/coco.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/coco.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/foggy.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/foggy.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/foggy.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/foggy.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/kitti.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/kitti.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/kitti.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/kitti.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/lvis.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/lvis.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/lvis.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/lvis.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/train_loop.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/train_loop.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/engine/__pycache__/train_loop.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/engine/__pycache__/train_loop.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/testing.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/testing.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/testing.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/testing.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/batch_norm.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/batch_norm.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/batch_norm.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/batch_norm.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/deform_conv.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/deform_conv.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/deform_conv.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/deform_conv.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/shape_spec.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/shape_spec.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/shape_spec.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/shape_spec.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/sampling.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/sampling.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/sampling.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/sampling.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/solver/__pycache__/lr_scheduler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/solver/__pycache__/lr_scheduler.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/solver/__pycache__/lr_scheduler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/solver/__pycache__/lr_scheduler.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/collect_env.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/collect_env.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/utils/__pycache__/collect_env.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/utils/__pycache__/collect_env.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/__pycache__/detection_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/__pycache__/detection_utils.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/builtin.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/builtin.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/builtin.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/builtin.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/clipart.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/clipart.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/clipart.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/clipart.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/sim10k.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/sim10k.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/sim10k.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/sim10k.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/evaluator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/evaluator.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/evaluator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/evaluator.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/rotated_boxes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/rotated_boxes.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/rotated_boxes.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/rotated_boxes.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/fpn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/fpn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/fpn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/fpn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/image_list.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/image_list.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/image_list.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/image_list.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/instances.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/instances.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/instances.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/instances.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/keypoints.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/keypoints.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/keypoints.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/keypoints.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscape.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscape.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscape.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscape.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscapes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscapes.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/pascal_voc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/pascal_voc.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/pascal_voc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/pascal_voc.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/watercolor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/watercolor.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/watercolor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/watercolor.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/samplers/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/samplers/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/samplers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/samplers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/fast_eval_api.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/fast_eval_api.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/fast_eval_api.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/fast_eval_api.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/roi_align_rotated.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/roi_align_rotated.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/layers/__pycache__/roi_align_rotated.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/layers/__pycache__/roi_align_rotated.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/box_regression.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/box_regression.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/box_regression.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/box_regression.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/mmdet_wrapper.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/mmdet_wrapper.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/mmdet_wrapper.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/mmdet_wrapper.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/postprocessing.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/postprocessing.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/postprocessing.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/postprocessing.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/build.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/build.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/build.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/build.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/regnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/regnet.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/regnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/regnet.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/resnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/resnet.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/resnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/resnet.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/build.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/build.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/build.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/build.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/rotated_boxes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/rotated_boxes.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/structures/__pycache__/rotated_boxes.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/structures/__pycache__/rotated_boxes.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/builtin_meta.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/builtin_meta.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/builtin_meta.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/builtin_meta.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/transform.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/transform.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/transform.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/transform.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/coco_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/coco_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/coco_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/coco_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/lvis_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/lvis_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/lvis_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/lvis_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/layers/csrc/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | To add a new Op: 4 | 5 | 1. Create a new directory 6 | 2. Implement new ops there 7 | 3. Delcare its Python interface in `vision.cpp`. 8 | -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/anchor_generator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/anchor_generator.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/anchor_generator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/anchor_generator.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/backbone.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/backbone.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__pycache__/backbone.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/backbone/__pycache__/backbone.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/losses.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/losses.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/losses.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/losses.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/c2_model_loading.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/c2_model_loading.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/c2_model_loading.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/c2_model_loading.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscape_car.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscape_car.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscape_car.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscape_car.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscapes_org.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscapes_org.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscapes_org.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscapes_org.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/coco_panoptic.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/coco_panoptic.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/coco_panoptic.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/coco_panoptic.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/augmentation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/augmentation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/augmentation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/augmentation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/clipart_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/clipart_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/clipart_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/clipart_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/foggy_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/foggy_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/foggy_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/foggy_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/sem_seg_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/sem_seg_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/sem_seg_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/sem_seg_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/sim10k_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/sim10k_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/sim10k_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/sim10k_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/retinanet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/retinanet.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/retinanet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/retinanet.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/ttda_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/ttda_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/ttda_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/ttda_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/box_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/box_head.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/box_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/box_head.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/fast_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/fast_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/fast_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/fast_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/mask_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/mask_head.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/mask_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/mask_head.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/roi_heads.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/roi_heads.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/roi_heads.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/roi_heads.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/detection_checkpoint.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/detection_checkpoint.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/checkpoint/__pycache__/detection_checkpoint.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/checkpoint/__pycache__/detection_checkpoint.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/cityscape_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/cityscape_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/cityscape_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/cityscape_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/panoptic_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/panoptic_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/panoptic_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/panoptic_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/test_time_augmentation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/test_time_augmentation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/__pycache__/test_time_augmentation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/__pycache__/test_time_augmentation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/memory_bank.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/memory_bank.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/memory_bank.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/memory_bank.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/panoptic_fpn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/panoptic_fpn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/panoptic_fpn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/panoptic_fpn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/semantic_seg.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/semantic_seg.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/semantic_seg.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/semantic_seg.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/st_ttda_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/st_ttda_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/ts_ensemble.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/ts_ensemble.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/rpn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/rpn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/rpn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/rpn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/cascade_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/cascade_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/cascade_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/cascade_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscapes_panoptic.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscapes_panoptic.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/cityscapes_panoptic.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/cityscapes_panoptic.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/lvis_v0_5_categories.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/lvis_v0_5_categories.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/lvis_v0_5_categories.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/lvis_v0_5_categories.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/lvis_v1_categories.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/lvis_v1_categories.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/__pycache__/lvis_v1_categories.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/datasets/__pycache__/lvis_v1_categories.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/samplers/__pycache__/distributed_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/samplers/__pycache__/distributed_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/samplers/__pycache__/distributed_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/samplers/__pycache__/distributed_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/augmentation_impl.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/augmentation_impl.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/transforms/__pycache__/augmentation_impl.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/transforms/__pycache__/augmentation_impl.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/cityscapes_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/cityscapes_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/cityscapes_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/cityscapes_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/pascal_voc_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/pascal_voc_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/pascal_voc_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/pascal_voc_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/rotated_coco_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/rotated_coco_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/rotated_coco_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/rotated_coco_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/watercolor_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/watercolor_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/watercolor_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/watercolor_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/build.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/build.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/build.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/build.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/rrpn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/rrpn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/rrpn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/rrpn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/keypoint_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/keypoint_head.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/keypoint_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/keypoint_head.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/samplers/__pycache__/grouped_batch_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/samplers/__pycache__/grouped_batch_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/data/samplers/__pycache__/grouped_batch_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/data/samplers/__pycache__/grouped_batch_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/cityscape_car_evaluation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/cityscape_car_evaluation.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/evaluation/__pycache__/cityscape_car_evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/evaluation/__pycache__/cityscape_car_evaluation.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/student_ttda_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/student_ttda_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/student_ttda_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/student_ttda_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/teacher_ttda_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/teacher_ttda_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/teacher_ttda_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/teacher_ttda_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/rotated_fast_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/rotated_fast_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__pycache__/rotated_fast_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/roi_heads/__pycache__/rotated_fast_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/student_ttda_mem_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/student_ttda_mem_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/student_ttda_mem_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/student_ttda_mem_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/teacher_ttda_mem_rcnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/teacher_ttda_mem_rcnn.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__pycache__/teacher_ttda_mem_rcnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/meta_arch/__pycache__/teacher_ttda_mem_rcnn.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/proposal_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/proposal_utils.cpython-36.pyc -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__pycache__/proposal_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vibashan/online-da/HEAD/detectron2/modeling/proposal_generator/__pycache__/proposal_utils.cpython-37.pyc -------------------------------------------------------------------------------- /detectron2/data/datasets/register_coco.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .coco import register_coco_instances # noqa 3 | from .coco_panoptic import register_coco_panoptic_separated # noqa 4 | -------------------------------------------------------------------------------- /detectron2/utils/README.md: -------------------------------------------------------------------------------- 1 | # Utility functions 2 | 3 | This folder contain utility functions that are not used in the 4 | core library, but are useful for building models or training 5 | code using the config system. 6 | -------------------------------------------------------------------------------- /detectron2/export/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .api import * 4 | from .flatten import TracingAdapter 5 | from .torchscript import scripting_with_instances, dump_torchscript_IR 6 | 7 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 8 | -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator 3 | from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead 4 | 5 | __all__ = list(globals().keys()) 6 | -------------------------------------------------------------------------------- /dev/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Some scripts for developers to use, include: 3 | 4 | - `linter.sh`: lint the codebase before commit. 5 | - `run_{inference,instant}_tests.sh`: run inference/training for a few iterations. 6 | Note that these tests require 2 GPUs. 7 | - `parse_results.sh`: parse results from a log file. 8 | -------------------------------------------------------------------------------- /reference.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{vs2023towards, 2 | title={Towards Online Domain Adaptive Object Detection}, 3 | author={VS, Vibashan and Oza, Poojan and Patel, Vishal M}, 4 | booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision}, 5 | pages={478--488}, 6 | year={2023} 7 | } 8 | -------------------------------------------------------------------------------- /detectron2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from .utils.env import setup_environment 4 | 5 | setup_environment() 6 | 7 | 8 | # This line will be programatically read/write by setup.py. 9 | # Leave them at the bottom of this file and don't touch them. 10 | __version__ = "0.5" 11 | -------------------------------------------------------------------------------- /detectron2/solver/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params 3 | from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, LRMultiplier, WarmupParamScheduler 4 | 5 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 6 | -------------------------------------------------------------------------------- /detectron2/data/datasets/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Common Datasets 4 | 5 | The dataset implemented here do not need to load the data into the final format. 6 | It should provide the minimal data structure needed to use the dataset, so it can be very efficient. 7 | 8 | For example, for an image dataset, just provide the file names and labels, but don't read the images. 9 | Let the downstream decide how to read. 10 | -------------------------------------------------------------------------------- /detectron2/checkpoint/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # File: 4 | 5 | 6 | from . import catalog as _UNUSED # register the handler 7 | from .detection_checkpoint import DetectionCheckpointer 8 | from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer 9 | 10 | __all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] 11 | -------------------------------------------------------------------------------- /detectron2/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from .launch import * 4 | from .train_loop import * 5 | 6 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 7 | 8 | 9 | # prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) 10 | # but still make them available here 11 | from .hooks import * 12 | from .defaults import * 13 | -------------------------------------------------------------------------------- /configs/Base-RCNN-C4.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | RPN: 4 | PRE_NMS_TOPK_TEST: 6000 5 | POST_NMS_TOPK_TEST: 1000 6 | ROI_HEADS: 7 | NAME: "Res5ROIHeads" 8 | DATASETS: 9 | TRAIN: ("coco_2017_train",) 10 | TEST: ("coco_2017_val",) 11 | SOLVER: 12 | IMS_PER_BATCH: 16 13 | BASE_LR: 0.02 14 | STEPS: (60000, 80000) 15 | MAX_ITER: 90000 16 | INPUT: 17 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 18 | VERSION: 2 19 | -------------------------------------------------------------------------------- /detectron2/data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .distributed_sampler import ( 3 | InferenceSampler, 4 | RandomSubsetTrainingSampler, 5 | RepeatFactorTrainingSampler, 6 | TrainingSampler, 7 | ) 8 | 9 | from .grouped_batch_sampler import GroupedBatchSampler 10 | 11 | __all__ = [ 12 | "GroupedBatchSampler", 13 | "TrainingSampler", 14 | "RandomSubsetTrainingSampler", 15 | "InferenceSampler", 16 | "RepeatFactorTrainingSampler", 17 | ] 18 | -------------------------------------------------------------------------------- /detectron2/model_zoo/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | """ 3 | Model Zoo API for Detectron2: a collection of functions to create common model architectures 4 | listed in `MODEL_ZOO.md `_, 5 | and optionally load their pre-trained weights. 6 | """ 7 | 8 | from .model_zoo import get, get_config_file, get_checkpoint_url, get_config 9 | 10 | __all__ = ["get_checkpoint_url", "get", "get_config_file", "get_config"] 11 | -------------------------------------------------------------------------------- /detectron2/data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from fvcore.transforms.transform import Transform, TransformList # order them first 3 | from fvcore.transforms.transform import * 4 | from .transform import * 5 | from .augmentation import * 6 | from .augmentation_impl import * 7 | 8 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 9 | 10 | 11 | from detectron2.utils.env import fixup_module_metadata 12 | 13 | fixup_module_metadata(__name__, globals(), __all__) 14 | del fixup_module_metadata 15 | -------------------------------------------------------------------------------- /detectron2/modeling/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip 3 | 4 | from .backbone import Backbone 5 | from .fpn import FPN 6 | from .regnet import RegNet 7 | from .resnet import ( 8 | BasicStem, 9 | ResNet, 10 | ResNetBlockBase, 11 | build_resnet_backbone, 12 | make_stage, 13 | BottleneckBlock, 14 | ) 15 | 16 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 17 | # TODO can expose more resnet blocks after careful consideration 18 | -------------------------------------------------------------------------------- /detectron2/export/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains code to prepare a detectron2 model for deployment. 3 | Currently it supports exporting a detectron2 model to Caffe2 format through ONNX. 4 | 5 | Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. 6 | 7 | 8 | ### Acknowledgements 9 | 10 | Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools. 11 | 12 | Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who 13 | help export Detectron2 models to TorchScript. 14 | -------------------------------------------------------------------------------- /dev/packaging/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## To build a cu101 wheel for release: 3 | 4 | ``` 5 | $ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101 6 | # inside the container: 7 | # git clone https://github.com/facebookresearch/detectron2/ 8 | # cd detectron2 9 | # export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.8 10 | # ./dev/packaging/build_wheel.sh 11 | ``` 12 | 13 | ## To build all wheels for combinations of CUDA and Python 14 | ``` 15 | ./dev/packaging/build_all_wheels.sh 16 | ./dev/packaging/gen_wheel_index.sh /path/to/wheels 17 | ``` 18 | -------------------------------------------------------------------------------- /dataset/prepare_for_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # Download some files needed for running tests. 5 | 6 | cd "${0%/*}" 7 | 8 | BASE=https://dl.fbaipublicfiles.com/detectron2 9 | mkdir -p coco/annotations 10 | 11 | for anno in instances_val2017_100 \ 12 | person_keypoints_val2017_100 \ 13 | instances_minival2014_100 \ 14 | person_keypoints_minival2014_100; do 15 | 16 | dest=coco/annotations/$anno.json 17 | [[ -s $dest ]] && { 18 | echo "$dest exists. Skipping ..." 19 | } || { 20 | wget $BASE/annotations/coco/$anno.json -O $dest 21 | } 22 | done 23 | -------------------------------------------------------------------------------- /configs/online_da/foggy_baseline.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | WEIGHT: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RPN: 6 | PRE_NMS_TOPK_TEST: 6000 7 | POST_NMS_TOPK_TEST: 300 8 | ANCHOR_SIZES: (128, 256, 512) 9 | ROI_HEADS: 10 | NUM_CLASSES: 8 11 | INPUT: 12 | MIN_SIZE_TRAIN: (600,) 13 | MIN_SIZE_TEST: 600 14 | DATASETS: 15 | TRAIN: ("cityscape_2007_train_t",) 16 | TEST: ("cityscape_2007_test_t",) 17 | SOLVER: 18 | BASE_LR: 0.001 19 | WEIGHT_DECAY: 0.0001 20 | STEPS: (50000, ) 21 | MAX_ITER: 70000 22 | IMS_PER_BATCH: 1 23 | OUTPUT_DIR: "foggy_baseline" 24 | -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | from .build import META_ARCH_REGISTRY, build_model # isort:skip 5 | 6 | from .panoptic_fpn import PanopticFPN 7 | 8 | # import all the meta_arch, so they will be registered 9 | from .rcnn import GeneralizedRCNN, ProposalNetwork 10 | from .student_ttda_mem_rcnn import student_ttda_mem_RCNN 11 | from .teacher_ttda_mem_rcnn import teacher_ttda_mem_RCNN 12 | from .retinanet import RetinaNet 13 | from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head 14 | 15 | 16 | __all__ = list(globals().keys()) 17 | -------------------------------------------------------------------------------- /detectron2/config/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .compat import downgrade_config, upgrade_config 3 | from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable 4 | from .instantiate import instantiate 5 | from .lazy import LazyCall, LazyConfig 6 | 7 | __all__ = [ 8 | "CfgNode", 9 | "get_cfg", 10 | "global_cfg", 11 | "set_global_cfg", 12 | "downgrade_config", 13 | "upgrade_config", 14 | "configurable", 15 | "instantiate", 16 | "LazyCall", 17 | "LazyConfig", 18 | ] 19 | 20 | 21 | from detectron2.utils.env import fixup_module_metadata 22 | 23 | fixup_module_metadata(__name__, globals(), __all__) 24 | del fixup_module_metadata 25 | -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/ts_ensemble.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from torch.nn.parallel import DataParallel, DistributedDataParallel 3 | import torch.nn as nn 4 | 5 | 6 | class EnsembleTSModel(nn.Module): 7 | def __init__(self, modelTeacher, modelStudent): 8 | super(EnsembleTSModel, self).__init__() 9 | 10 | if isinstance(modelTeacher, (DistributedDataParallel, DataParallel)): 11 | modelTeacher = modelTeacher.module 12 | if isinstance(modelStudent, (DistributedDataParallel, DataParallel)): 13 | modelStudent = modelStudent.module 14 | 15 | self.modelTeacher = modelTeacher 16 | self.modelStudent = modelStudent -------------------------------------------------------------------------------- /detectron2/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from . import transforms # isort:skip 3 | 4 | from .build import ( 5 | build_batch_data_loader, 6 | build_detection_test_loader, 7 | build_detection_train_loader, 8 | get_detection_dataset_dicts, 9 | load_proposals_into_dataset, 10 | print_instances_class_histogram, 11 | ) 12 | from .catalog import DatasetCatalog, MetadataCatalog, Metadata 13 | from .common import DatasetFromList, MapDataset, ToIterableDataset 14 | from .dataset_mapper import DatasetMapper 15 | 16 | # ensure the builtin datasets are registered 17 | from . import datasets, samplers # isort:skip 18 | 19 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 20 | -------------------------------------------------------------------------------- /detectron2/structures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa, pairwise_point_box_distance 3 | from .image_list import ImageList 4 | 5 | from .instances import Instances 6 | from .keypoints import Keypoints, heatmaps_to_keypoints 7 | from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks 8 | from .rotated_boxes import RotatedBoxes 9 | from .rotated_boxes import pairwise_iou as pairwise_iou_rotated 10 | 11 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 12 | 13 | 14 | from detectron2.utils.env import fixup_module_metadata 15 | 16 | fixup_module_metadata(__name__, globals(), __all__) 17 | del fixup_module_metadata 18 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/cuda_version.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | #include 4 | 5 | namespace detectron2 { 6 | int get_cudart_version() { 7 | // Not a ROCM platform: Either HIP is not used, or 8 | // it is used, but platform is not ROCM (i.e. it is CUDA) 9 | #if !defined(__HIP_PLATFORM_HCC__) 10 | return CUDART_VERSION; 11 | #else 12 | int version = 0; 13 | 14 | #if HIP_VERSION_MAJOR != 0 15 | // Create a convention similar to that of CUDA, as assumed by other 16 | // parts of the code. 17 | 18 | version = HIP_VERSION_MINOR; 19 | version += (HIP_VERSION_MAJOR * 100); 20 | #else 21 | hipRuntimeGetVersion(&version); 22 | #endif 23 | return version; 24 | #endif 25 | } 26 | } // namespace detectron2 27 | -------------------------------------------------------------------------------- /detectron2/layers/rotated_boxes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from __future__ import absolute_import, division, print_function, unicode_literals 3 | 4 | from detectron2 import _C 5 | 6 | 7 | def pairwise_iou_rotated(boxes1, boxes2): 8 | """ 9 | Return intersection-over-union (Jaccard index) of boxes. 10 | 11 | Both sets of boxes are expected to be in 12 | (x_center, y_center, width, height, angle) format. 13 | 14 | Arguments: 15 | boxes1 (Tensor[N, 5]) 16 | boxes2 (Tensor[M, 5]) 17 | 18 | Returns: 19 | iou (Tensor[N, M]): the NxM matrix containing the pairwise 20 | IoU values for every element in boxes1 and boxes2 21 | """ 22 | return _C.box_iou_rotated(boxes1, boxes2) 23 | -------------------------------------------------------------------------------- /detectron2/layers/shape_spec.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | from collections import namedtuple 4 | 5 | 6 | class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): 7 | """ 8 | A simple structure that contains basic shape specification about a tensor. 9 | It is often used as the auxiliary inputs/outputs of models, 10 | to complement the lack of shape inference ability among pytorch modules. 11 | 12 | Attributes: 13 | channels: 14 | height: 15 | width: 16 | stride: 17 | """ 18 | 19 | def __new__(cls, channels=None, height=None, width=None, stride=None): 20 | return super().__new__(cls, channels, height, width, stride) 21 | -------------------------------------------------------------------------------- /configs/Base-RCNN-DilatedC5.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | RESNETS: 4 | OUT_FEATURES: ["res5"] 5 | RES5_DILATION: 2 6 | RPN: 7 | IN_FEATURES: ["res5"] 8 | PRE_NMS_TOPK_TEST: 6000 9 | POST_NMS_TOPK_TEST: 1000 10 | ROI_HEADS: 11 | NAME: "StandardROIHeads" 12 | IN_FEATURES: ["res5"] 13 | ROI_BOX_HEAD: 14 | NAME: "FastRCNNConvFCHead" 15 | NUM_FC: 2 16 | POOLER_RESOLUTION: 7 17 | ROI_MASK_HEAD: 18 | NAME: "MaskRCNNConvUpsampleHead" 19 | NUM_CONV: 4 20 | POOLER_RESOLUTION: 14 21 | DATASETS: 22 | TRAIN: ("coco_2017_train",) 23 | TEST: ("coco_2017_val",) 24 | SOLVER: 25 | IMS_PER_BATCH: 16 26 | BASE_LR: 0.02 27 | STEPS: (60000, 80000) 28 | MAX_ITER: 90000 29 | INPUT: 30 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 31 | VERSION: 2 32 | -------------------------------------------------------------------------------- /configs/online_da/onda_foggy_mem.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "student_ttda_mem_RCNN" 3 | WEIGHT: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RPN: 6 | PRE_NMS_TOPK_TEST: 6000 7 | POST_NMS_TOPK_TEST: 300 8 | ANCHOR_SIZES: (128, 256, 512) 9 | ROI_HEADS: 10 | NUM_CLASSES: 8 11 | RESNETS: 12 | NORM: "FrozenBN" # Options: FrozenBN, GN, "SyncBN", "BN" 13 | OUT_FEATURES: ["res4"] 14 | INPUT: 15 | MIN_SIZE_TRAIN: (600,) 16 | MIN_SIZE_TEST: 600 17 | DATASETS: 18 | TRAIN: ("cityscape_2007_train_t",) 19 | TEST: ("cityscape_2007_test_t",) 20 | SOLVER: 21 | BASE_LR: 0.001 22 | WEIGHT_DECAY: 0.0001 23 | STEPS: (50000, ) 24 | MAX_ITER: 70000 25 | WARMUP_ITERS: 0 26 | IMS_PER_BATCH: 1 27 | TEST_TIME: 28 | TYPE: True 29 | OUTPUT_DIR: "checkpoint/online_da_foggy" -------------------------------------------------------------------------------- /configs/online_da/onda_sim_mem.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "student_ttda_mem_RCNN" 3 | WEIGHT: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RPN: 6 | PRE_NMS_TOPK_TEST: 6000 7 | POST_NMS_TOPK_TEST: 300 8 | ANCHOR_SIZES: (128, 256, 512) 9 | ROI_HEADS: 10 | NUM_CLASSES: 1 11 | RESNETS: 12 | NORM: "FrozenBN" # Options: FrozenBN, GN, "SyncBN", "BN" 13 | OUT_FEATURES: ["res4"] 14 | INPUT: 15 | MIN_SIZE_TRAIN: (600,) 16 | MIN_SIZE_TEST: 600 17 | DATASETS: 18 | TRAIN: ("cityscape_car_2007_train_s",) 19 | TEST: ("cityscape_car_2007_test_s",) 20 | SOLVER: 21 | BASE_LR: 0.001 22 | WEIGHT_DECAY: 0.0001 23 | STEPS: (50000, ) 24 | MAX_ITER: 70000 25 | IMS_PER_BATCH: 1 26 | WARMUP_ITERS: 0 27 | TEST_TIME: 28 | TYPE: True 29 | OUTPUT_DIR: "checkpoint/online_da_sim" -------------------------------------------------------------------------------- /configs/online_da/onda_kitti_mem.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "student_ttda_mem_RCNN" 3 | WEIGHT: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" 4 | MASK_ON: False 5 | RPN: 6 | PRE_NMS_TOPK_TEST: 6000 7 | POST_NMS_TOPK_TEST: 300 8 | ANCHOR_SIZES: (128, 256, 512) 9 | ROI_HEADS: 10 | NUM_CLASSES: 2 11 | RESNETS: 12 | NORM: "FrozenBN" # Options: FrozenBN, GN, "SyncBN", "BN" 13 | OUT_FEATURES: ["res4"] 14 | INPUT: 15 | MIN_SIZE_TRAIN: (600,) 16 | MIN_SIZE_TEST: 600 17 | DATASETS: 18 | TRAIN: ("cityscape_car_2007_train_s",) 19 | TEST: ("cityscape_car_2007_test_s",) 20 | SOLVER: 21 | BASE_LR: 0.001 22 | WEIGHT_DECAY: 0.0001 23 | STEPS: (50000, ) 24 | MAX_ITER: 70000 25 | IMS_PER_BATCH: 1 26 | WARMUP_ITERS: 0 27 | TEST_TIME: 28 | TYPE: True 29 | OUTPUT_DIR: "checkpoint/online_da_Kitti" -------------------------------------------------------------------------------- /configs/Base-RetinaNet.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "RetinaNet" 3 | BACKBONE: 4 | NAME: "build_retinanet_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res3", "res4", "res5"] 7 | ANCHOR_GENERATOR: 8 | SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"] 9 | FPN: 10 | IN_FEATURES: ["res3", "res4", "res5"] 11 | RETINANET: 12 | IOU_THRESHOLDS: [0.4, 0.5] 13 | IOU_LABELS: [0, -1, 1] 14 | SMOOTH_L1_LOSS_BETA: 0.0 15 | DATASETS: 16 | TRAIN: ("coco_2017_train",) 17 | TEST: ("coco_2017_val",) 18 | SOLVER: 19 | IMS_PER_BATCH: 16 20 | BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate 21 | STEPS: (60000, 80000) 22 | MAX_ITER: 90000 23 | INPUT: 24 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 25 | VERSION: 2 26 | -------------------------------------------------------------------------------- /detectron2/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm 3 | from .deform_conv import DeformConv, ModulatedDeformConv 4 | from .mask_ops import paste_masks_in_image 5 | from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated 6 | from .roi_align import ROIAlign, roi_align 7 | from .roi_align_rotated import ROIAlignRotated, roi_align_rotated 8 | from .shape_spec import ShapeSpec 9 | from .wrappers import ( 10 | BatchNorm2d, 11 | Conv2d, 12 | ConvTranspose2d, 13 | cat, 14 | interpolate, 15 | Linear, 16 | nonzero_tuple, 17 | cross_entropy, 18 | ) 19 | from .blocks import CNNBlockBase, DepthwiseSeparableConv2d 20 | from .aspp import ASPP 21 | 22 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 23 | -------------------------------------------------------------------------------- /detectron2/modeling/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head, FastRCNNConvFCHead 3 | from .keypoint_head import ( 4 | ROI_KEYPOINT_HEAD_REGISTRY, 5 | build_keypoint_head, 6 | BaseKeypointRCNNHead, 7 | KRCNNConvDeconvUpsampleHead, 8 | ) 9 | from .mask_head import ( 10 | ROI_MASK_HEAD_REGISTRY, 11 | build_mask_head, 12 | BaseMaskRCNNHead, 13 | MaskRCNNConvUpsampleHead, 14 | ) 15 | from .roi_heads import ( 16 | ROI_HEADS_REGISTRY, 17 | ROIHeads, 18 | Res5ROIHeads, 19 | StandardROIHeads, 20 | build_roi_heads, 21 | select_foreground_proposals, 22 | ) 23 | from .cascade_rcnn import CascadeROIHeads 24 | from .rotated_fast_rcnn import RROIHeads 25 | from .fast_rcnn import FastRCNNOutputLayers 26 | 27 | from . import cascade_rcnn # isort:skip 28 | 29 | __all__ = list(globals().keys()) 30 | -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import torch 3 | 4 | from detectron2.utils.logger import _log_api_usage 5 | from detectron2.utils.registry import Registry 6 | 7 | META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip 8 | META_ARCH_REGISTRY.__doc__ = """ 9 | Registry for meta-architectures, i.e. the whole model. 10 | 11 | The registered object will be called with `obj(cfg)` 12 | and expected to return a `nn.Module` object. 13 | """ 14 | 15 | 16 | def build_model(cfg): 17 | """ 18 | Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. 19 | Note that it does not load any weights from ``cfg``. 20 | """ 21 | meta_arch = cfg.MODEL.META_ARCHITECTURE 22 | model = META_ARCH_REGISTRY.get(meta_arch)(cfg) 23 | model.to(torch.device(cfg.MODEL.DEVICE)) 24 | _log_api_usage("modeling.meta_arch." + meta_arch) 25 | return model 26 | -------------------------------------------------------------------------------- /detectron2/modeling/proposal_generator/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from detectron2.utils.registry import Registry 3 | 4 | PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") 5 | PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ 6 | Registry for proposal generator, which produces object proposals from feature maps. 7 | 8 | The registered object will be called with `obj(cfg, input_shape)`. 9 | The call should return a `nn.Module` object. 10 | """ 11 | 12 | from . import rpn, rrpn # noqa F401 isort:skip 13 | 14 | 15 | def build_proposal_generator(cfg, input_shape): 16 | """ 17 | Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. 18 | The name can be "PrecomputedProposals" to use no proposal generator. 19 | """ 20 | name = cfg.MODEL.PROPOSAL_GENERATOR.NAME 21 | if name == "PrecomputedProposals": 22 | return None 23 | 24 | return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) 25 | -------------------------------------------------------------------------------- /dev/run_instant_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | BIN="python tools/train_net.py" 5 | OUTPUT="instant_test_output" 6 | NUM_GPUS=2 7 | 8 | CFG_LIST=( "${@:1}" ) 9 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 10 | CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) 11 | fi 12 | 13 | echo "========================================================================" 14 | echo "Configs to run:" 15 | echo "${CFG_LIST[@]}" 16 | echo "========================================================================" 17 | 18 | for cfg in "${CFG_LIST[@]}"; do 19 | echo "========================================================================" 20 | echo "Running $cfg ..." 21 | echo "========================================================================" 22 | $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ 23 | SOLVER.IMS_PER_BATCH $(($NUM_GPUS * 2)) \ 24 | OUTPUT_DIR "$OUTPUT" 25 | rm -rf "$OUTPUT" 26 | done 27 | 28 | -------------------------------------------------------------------------------- /tools/deploy/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # See https://pytorch.org/tutorials/advanced/cpp_frontend.html 3 | cmake_minimum_required(VERSION 3.12 FATAL_ERROR) 4 | project(caffe2_mask_rcnn) 5 | 6 | find_package(Torch REQUIRED) 7 | find_package(gflags REQUIRED) # needed by caffe2 8 | find_package(OpenCV REQUIRED) 9 | find_package(TorchVision REQUIRED) # needed by export-method=tracing/scripting 10 | 11 | add_executable(caffe2_mask_rcnn caffe2_mask_rcnn.cpp) 12 | target_link_libraries( 13 | caffe2_mask_rcnn 14 | "${TORCH_LIBRARIES}" gflags glog protobuf ${OpenCV_LIBS}) 15 | set_property(TARGET caffe2_mask_rcnn PROPERTY CXX_STANDARD 14) 16 | 17 | 18 | add_executable(torchscript_traced_mask_rcnn torchscript_traced_mask_rcnn.cpp) 19 | target_link_libraries( 20 | torchscript_traced_mask_rcnn 21 | -Wl,--no-as-needed TorchVision::TorchVision -Wl,--as-needed 22 | "${TORCH_LIBRARIES}" ${OpenCV_LIBS}) 23 | set_property(TARGET torchscript_traced_mask_rcnn PROPERTY CXX_STANDARD 14) 24 | -------------------------------------------------------------------------------- /dataset/prepare_ade20k_sem_seg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright (c) Facebook, Inc. and its affiliates. 4 | import numpy as np 5 | import os 6 | from pathlib import Path 7 | import tqdm 8 | from PIL import Image 9 | 10 | 11 | def convert(input, output): 12 | img = np.asarray(Image.open(input)) 13 | assert img.dtype == np.uint8 14 | img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1 15 | Image.fromarray(img).save(output) 16 | 17 | 18 | if __name__ == "__main__": 19 | dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016" 20 | for name in ["training", "validation"]: 21 | annotation_dir = dataset_dir / "annotations" / name 22 | output_dir = dataset_dir / "annotations_detectron2" / name 23 | output_dir.mkdir(parents=True, exist_ok=True) 24 | for file in tqdm.tqdm(list(annotation_dir.iterdir())): 25 | output_file = output_dir / file.name 26 | convert(file, output_file) 27 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length=100 3 | multi_line_output=3 4 | include_trailing_comma=True 5 | known_standard_library=numpy,setuptools,mock 6 | skip=./datasets,docs 7 | skip_glob=*/__init__.py,**/configs/**,tests/config/** 8 | known_myself=detectron2 9 | known_third_party=fvcore,matplotlib,cv2,torch,torchvision,PIL,pycocotools,yacs,termcolor,cityscapesscripts,tabulate,tqdm,scipy,lvis,psutil,pkg_resources,caffe2,onnx,panopticapi,black,isort,av,iopath,omegaconf,hydra,yaml,pydoc,submitit,cloudpickle 10 | no_lines_before=STDLIB,THIRDPARTY 11 | sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER 12 | default_section=FIRSTPARTY 13 | 14 | [mypy] 15 | python_version=3.6 16 | ignore_missing_imports = True 17 | warn_unused_configs = True 18 | disallow_untyped_defs = True 19 | check_untyped_defs = True 20 | warn_unused_ignores = True 21 | warn_redundant_casts = True 22 | show_column_numbers = True 23 | follow_imports = silent 24 | allow_redefinition = True 25 | ; Require all functions to be annotated 26 | disallow_incomplete_defs = True 27 | -------------------------------------------------------------------------------- /detectron2/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json 3 | from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated 4 | from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta 5 | from .pascal_voc import load_voc_instances, register_pascal_voc 6 | from . import builtin as _builtin # ensure the builtin datasets are registered 7 | 8 | from .clipart import load_clipart_instances, register_clipart 9 | from .watercolor import load_watercolor_instances, register_watercolor 10 | from .cityscape import load_cityscape_instances, register_cityscape 11 | from .foggy import load_foggy_instances, register_foggy 12 | from .sim10k import load_sim10k_instances, register_sim10k 13 | from .kitti import load_kitti_instances, register_kitti 14 | from .cityscape_car import load_cityscape_car_instances, register_cityscape_car 15 | 16 | 17 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 18 | -------------------------------------------------------------------------------- /dev/linter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # cd to detectron2 project root 5 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 6 | 7 | { 8 | black --version | grep -E "21\." > /dev/null 9 | } || { 10 | echo "Linter requires 'black==21.*' !" 11 | exit 1 12 | } 13 | 14 | ISORT_VERSION=$(isort --version-number) 15 | if [[ "$ISORT_VERSION" != 4.3* ]]; then 16 | echo "Linter requires isort==4.3.21 !" 17 | exit 1 18 | fi 19 | 20 | set -v 21 | 22 | echo "Running isort ..." 23 | isort -y -sp . --atomic 24 | 25 | echo "Running black ..." 26 | black -l 100 . 27 | 28 | echo "Running flake8 ..." 29 | if [ -x "$(command -v flake8-3)" ]; then 30 | flake8-3 . 31 | else 32 | python3 -m flake8 . 33 | fi 34 | 35 | # echo "Running mypy ..." 36 | # Pytorch does not have enough type annotations 37 | # mypy detectron2/solver detectron2/structures detectron2/config 38 | 39 | echo "Running clang-format ..." 40 | find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i 41 | 42 | command -v arc > /dev/null && arc lint 43 | -------------------------------------------------------------------------------- /detectron2/modeling/backbone/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from detectron2.layers import ShapeSpec 3 | from detectron2.utils.registry import Registry 4 | 5 | from .backbone import Backbone 6 | 7 | BACKBONE_REGISTRY = Registry("BACKBONE") 8 | BACKBONE_REGISTRY.__doc__ = """ 9 | Registry for backbones, which extract feature maps from images 10 | 11 | The registered object must be a callable that accepts two arguments: 12 | 13 | 1. A :class:`detectron2.config.CfgNode` 14 | 2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. 15 | 16 | Registered object must return instance of :class:`Backbone`. 17 | """ 18 | 19 | 20 | def build_backbone(cfg, input_shape=None): 21 | """ 22 | Build a backbone from `cfg.MODEL.BACKBONE.NAME`. 23 | 24 | Returns: 25 | an instance of :class:`Backbone` 26 | """ 27 | if input_shape is None: 28 | input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) 29 | 30 | backbone_name = cfg.MODEL.BACKBONE.NAME 31 | backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) 32 | assert isinstance(backbone, Backbone) 33 | return backbone 34 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #pragma once 3 | #include 4 | 5 | namespace detectron2 { 6 | 7 | at::Tensor box_iou_rotated_cpu( 8 | const at::Tensor& boxes1, 9 | const at::Tensor& boxes2); 10 | 11 | #if defined(WITH_CUDA) || defined(WITH_HIP) 12 | at::Tensor box_iou_rotated_cuda( 13 | const at::Tensor& boxes1, 14 | const at::Tensor& boxes2); 15 | #endif 16 | 17 | // Interface for Python 18 | // inline is needed to prevent multiple function definitions when this header is 19 | // included by different cpps 20 | inline at::Tensor box_iou_rotated( 21 | const at::Tensor& boxes1, 22 | const at::Tensor& boxes2) { 23 | assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); 24 | if (boxes1.device().is_cuda()) { 25 | #if defined(WITH_CUDA) || defined(WITH_HIP) 26 | return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); 27 | #else 28 | AT_ERROR("Detectron2 is not compiled with GPU support!"); 29 | #endif 30 | } 31 | 32 | return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); 33 | } 34 | 35 | } // namespace detectron2 36 | -------------------------------------------------------------------------------- /detectron2/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator 3 | from .coco_evaluation import COCOEvaluator 4 | from .rotated_coco_evaluation import RotatedCOCOEvaluator 5 | from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset, inference_on_corruption_dataset, oshot_inference_on_dataset 6 | from .lvis_evaluation import LVISEvaluator 7 | from .panoptic_evaluation import COCOPanopticEvaluator 8 | from .pascal_voc_evaluation import PascalVOCDetectionEvaluator 9 | from .sem_seg_evaluation import SemSegEvaluator 10 | from .testing import print_csv_format, verify_results 11 | 12 | from .clipart_evaluation import ClipartDetectionEvaluator 13 | from .watercolor_evaluation import WatercolorDetectionEvaluator 14 | from .cityscape_evaluation import CityscapeDetectionEvaluator 15 | from .foggy_evaluation import FoggyDetectionEvaluator 16 | from .cityscape_car_evaluation import CityscapeCarDetectionEvaluator 17 | from .sim10k_evaluation import Sim10kDetectionEvaluator 18 | 19 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 20 | -------------------------------------------------------------------------------- /dev/packaging/build_wheel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | set -ex 4 | 5 | ldconfig # https://github.com/NVIDIA/nvidia-docker/issues/854 6 | 7 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 8 | . "$script_dir/pkg_helpers.bash" 9 | 10 | echo "Build Settings:" 11 | echo "CU_VERSION: $CU_VERSION" # e.g. cu101 12 | echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or "" 13 | echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.6 14 | echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4 15 | 16 | setup_cuda 17 | setup_wheel_python 18 | 19 | yum install ninja-build -y 20 | ln -sv /usr/bin/ninja-build /usr/bin/ninja || true 21 | 22 | pip_install pip numpy -U 23 | pip_install "torch==$PYTORCH_VERSION" \ 24 | -f https://download.pytorch.org/whl/"$CU_VERSION"/torch_stable.html 25 | 26 | # use separate directories to allow parallel build 27 | BASE_BUILD_DIR=build/$CU_VERSION-py$PYTHON_VERSION-pt$PYTORCH_VERSION 28 | python setup.py \ 29 | build -b "$BASE_BUILD_DIR" \ 30 | bdist_wheel -b "$BASE_BUILD_DIR/build_dist" -d "wheels/$CU_VERSION/torch$PYTORCH_VERSION" 31 | rm -rf "$BASE_BUILD_DIR" 32 | -------------------------------------------------------------------------------- /detectron2/utils/serialize.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import cloudpickle 3 | 4 | 5 | class PicklableWrapper(object): 6 | """ 7 | Wrap an object to make it more picklable, note that it uses 8 | heavy weight serialization libraries that are slower than pickle. 9 | It's best to use it only on closures (which are usually not picklable). 10 | 11 | This is a simplified version of 12 | https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py 13 | """ 14 | 15 | def __init__(self, obj): 16 | while isinstance(obj, PicklableWrapper): 17 | # Wrapping an object twice is no-op 18 | obj = obj._obj 19 | self._obj = obj 20 | 21 | def __reduce__(self): 22 | s = cloudpickle.dumps(self._obj) 23 | return cloudpickle.loads, (s,) 24 | 25 | def __call__(self, *args, **kwargs): 26 | return self._obj(*args, **kwargs) 27 | 28 | def __getattr__(self, attr): 29 | # Ensure that the wrapped object can be used seamlessly as the previous object. 30 | if attr not in ["_obj"]: 31 | return getattr(self._obj, attr) 32 | return getattr(self, attr) 33 | -------------------------------------------------------------------------------- /detectron2/projects/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import importlib 3 | from pathlib import Path 4 | 5 | _PROJECTS = { 6 | "point_rend": "PointRend", 7 | "deeplab": "DeepLab", 8 | "panoptic_deeplab": "Panoptic-DeepLab", 9 | } 10 | _PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent / "projects" 11 | 12 | if _PROJECT_ROOT.is_dir(): 13 | # This is true only for in-place installation (pip install -e, setup.py develop), 14 | # where setup(package_dir=) does not work: https://github.com/pypa/setuptools/issues/230 15 | 16 | class _D2ProjectsFinder(importlib.abc.MetaPathFinder): 17 | def find_spec(self, name, path, target=None): 18 | if not name.startswith("detectron2.projects."): 19 | return 20 | project_name = name.split(".")[-1] 21 | project_dir = _PROJECTS.get(project_name) 22 | if not project_dir: 23 | return 24 | target_file = _PROJECT_ROOT / f"{project_dir}/{project_name}/__init__.py" 25 | if not target_file.is_file(): 26 | return 27 | return importlib.util.spec_from_file_location(name, target_file) 28 | 29 | import sys 30 | 31 | sys.meta_path.append(_D2ProjectsFinder()) 32 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/nms_rotated/nms_rotated.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #pragma once 3 | #include 4 | 5 | namespace detectron2 { 6 | 7 | at::Tensor nms_rotated_cpu( 8 | const at::Tensor& dets, 9 | const at::Tensor& scores, 10 | const double iou_threshold); 11 | 12 | #if defined(WITH_CUDA) || defined(WITH_HIP) 13 | at::Tensor nms_rotated_cuda( 14 | const at::Tensor& dets, 15 | const at::Tensor& scores, 16 | const double iou_threshold); 17 | #endif 18 | 19 | // Interface for Python 20 | // inline is needed to prevent multiple function definitions when this header is 21 | // included by different cpps 22 | inline at::Tensor nms_rotated( 23 | const at::Tensor& dets, 24 | const at::Tensor& scores, 25 | const double iou_threshold) { 26 | assert(dets.device().is_cuda() == scores.device().is_cuda()); 27 | if (dets.device().is_cuda()) { 28 | #if defined(WITH_CUDA) || defined(WITH_HIP) 29 | return nms_rotated_cuda( 30 | dets.contiguous(), scores.contiguous(), iou_threshold); 31 | #else 32 | AT_ERROR("Detectron2 is not compiled with GPU support!"); 33 | #endif 34 | } 35 | 36 | return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); 37 | } 38 | 39 | } // namespace detectron2 40 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #include "box_iou_rotated.h" 3 | #include "box_iou_rotated_utils.h" 4 | 5 | namespace detectron2 { 6 | 7 | template 8 | void box_iou_rotated_cpu_kernel( 9 | const at::Tensor& boxes1, 10 | const at::Tensor& boxes2, 11 | at::Tensor& ious) { 12 | auto num_boxes1 = boxes1.size(0); 13 | auto num_boxes2 = boxes2.size(0); 14 | 15 | for (int i = 0; i < num_boxes1; i++) { 16 | for (int j = 0; j < num_boxes2; j++) { 17 | ious[i * num_boxes2 + j] = single_box_iou_rotated( 18 | boxes1[i].data_ptr(), boxes2[j].data_ptr()); 19 | } 20 | } 21 | } 22 | 23 | at::Tensor box_iou_rotated_cpu( 24 | // input must be contiguous: 25 | const at::Tensor& boxes1, 26 | const at::Tensor& boxes2) { 27 | auto num_boxes1 = boxes1.size(0); 28 | auto num_boxes2 = boxes2.size(0); 29 | at::Tensor ious = 30 | at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); 31 | 32 | box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); 33 | 34 | // reshape from 1d array to 2d array 35 | auto shape = std::vector{num_boxes1, num_boxes2}; 36 | return ious.reshape(shape); 37 | } 38 | 39 | } // namespace detectron2 40 | -------------------------------------------------------------------------------- /detectron2/utils/file_io.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathHandler 3 | from iopath.common.file_io import PathManager as PathManagerBase 4 | 5 | __all__ = ["PathManager", "PathHandler"] 6 | 7 | 8 | PathManager = PathManagerBase() 9 | """ 10 | This is a detectron2 project-specific PathManager. 11 | We try to stay away from global PathManager in fvcore as it 12 | introduces potential conflicts among other libraries. 13 | """ 14 | 15 | 16 | class Detectron2Handler(PathHandler): 17 | """ 18 | Resolve anything that's hosted under detectron2's namespace. 19 | """ 20 | 21 | PREFIX = "detectron2://" 22 | S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" 23 | 24 | def _get_supported_prefixes(self): 25 | return [self.PREFIX] 26 | 27 | def _get_local_path(self, path, **kwargs): 28 | name = path[len(self.PREFIX) :] 29 | return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name, **kwargs) 30 | 31 | def _open(self, path, mode="r", **kwargs): 32 | return PathManager.open(self._get_local_path(path), mode, **kwargs) 33 | 34 | 35 | PathManager.register_handler(HTTPURLHandler()) 36 | PathManager.register_handler(OneDrivePathHandler()) 37 | PathManager.register_handler(Detectron2Handler()) 38 | -------------------------------------------------------------------------------- /dev/run_inference_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | BIN="python tools/train_net.py" 5 | OUTPUT="inference_test_output" 6 | NUM_GPUS=2 7 | 8 | CFG_LIST=( "${@:1}" ) 9 | 10 | if [ ${#CFG_LIST[@]} -eq 0 ]; then 11 | CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) 12 | fi 13 | 14 | echo "========================================================================" 15 | echo "Configs to run:" 16 | echo "${CFG_LIST[@]}" 17 | echo "========================================================================" 18 | 19 | 20 | for cfg in "${CFG_LIST[@]}"; do 21 | echo "========================================================================" 22 | echo "Running $cfg ..." 23 | echo "========================================================================" 24 | $BIN \ 25 | --eval-only \ 26 | --num-gpus $NUM_GPUS \ 27 | --config-file "$cfg" \ 28 | OUTPUT_DIR $OUTPUT 29 | rm -rf $OUTPUT 30 | done 31 | 32 | 33 | echo "========================================================================" 34 | echo "Running demo.py ..." 35 | echo "========================================================================" 36 | DEMO_BIN="python demo/demo.py" 37 | COCO_DIR=datasets/coco/val2014 38 | mkdir -pv $OUTPUT 39 | 40 | set -v 41 | 42 | $DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \ 43 | --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT 44 | rm -rf $OUTPUT 45 | -------------------------------------------------------------------------------- /configs/Base-RCNN-FPN.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "GeneralizedRCNN" 3 | BACKBONE: 4 | NAME: "build_resnet_fpn_backbone" 5 | RESNETS: 6 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 7 | FPN: 8 | IN_FEATURES: ["res2", "res3", "res4", "res5"] 9 | ANCHOR_GENERATOR: 10 | SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map 11 | ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) 12 | RPN: 13 | IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] 14 | PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level 15 | PRE_NMS_TOPK_TEST: 1000 # Per FPN level 16 | # Detectron1 uses 2000 proposals per-batch, 17 | # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) 18 | # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. 19 | POST_NMS_TOPK_TRAIN: 1000 20 | POST_NMS_TOPK_TEST: 1000 21 | ROI_HEADS: 22 | NAME: "StandardROIHeads" 23 | IN_FEATURES: ["p2", "p3", "p4", "p5"] 24 | ROI_BOX_HEAD: 25 | NAME: "FastRCNNConvFCHead" 26 | NUM_FC: 2 27 | POOLER_RESOLUTION: 7 28 | ROI_MASK_HEAD: 29 | NAME: "MaskRCNNConvUpsampleHead" 30 | NUM_CONV: 4 31 | POOLER_RESOLUTION: 14 32 | DATASETS: 33 | TRAIN: ("coco_2017_train",) 34 | TEST: ("coco_2017_val",) 35 | SOLVER: 36 | IMS_PER_BATCH: 16 37 | BASE_LR: 0.02 38 | STEPS: (60000, 80000) 39 | MAX_ITER: 90000 40 | INPUT: 41 | MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) 42 | VERSION: 2 43 | -------------------------------------------------------------------------------- /dev/packaging/gen_wheel_index.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | 5 | root=$(readlink -f $1) 6 | if [[ -z "$root" ]]; then 7 | echo "Usage: ./gen_wheel_index.sh /absolute/path/to/wheels" 8 | exit 9 | fi 10 | 11 | export LC_ALL=C # reproducible sort 12 | # NOTE: all sort in this script might not work when xx.10 is released 13 | 14 | index=$root/index.html 15 | 16 | cd "$root" 17 | for cu in cpu cu92 cu100 cu101 cu102 cu110 cu111 cu113; do 18 | mkdir -p "$root/$cu" 19 | cd "$root/$cu" 20 | echo "Creating $PWD/index.html ..." 21 | # First sort by torch version, then stable sort by d2 version with unique. 22 | # As a result, the latest torch version for each d2 version is kept. 23 | for whl in $(find -type f -name '*.whl' -printf '%P\n' \ 24 | | sort -k 1 -r | sort -t '/' -k 2 --stable -r --unique); do 25 | echo "$whl
" 26 | done > index.html 27 | 28 | 29 | for torch in torch*; do 30 | cd "$root/$cu/$torch" 31 | 32 | # list all whl for each cuda,torch version 33 | echo "Creating $PWD/index.html ..." 34 | for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort -r); do 35 | echo "$whl
" 36 | done > index.html 37 | done 38 | done 39 | 40 | cd "$root" 41 | # Just list everything: 42 | echo "Creating $index ..." 43 | for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort -r); do 44 | echo "$whl
" 45 | done > "$index" 46 | 47 | -------------------------------------------------------------------------------- /dev/packaging/build_all_wheels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | [[ -d "dev/packaging" ]] || { 5 | echo "Please run this script at detectron2 root!" 6 | exit 1 7 | } 8 | 9 | build_one() { 10 | cu=$1 11 | pytorch_ver=$2 12 | 13 | case "$cu" in 14 | cu*) 15 | container_name=manylinux-cuda${cu/cu/} 16 | ;; 17 | cpu) 18 | container_name=manylinux-cuda101 19 | ;; 20 | *) 21 | echo "Unrecognized cu=$cu" 22 | exit 1 23 | ;; 24 | esac 25 | 26 | echo "Launching container $container_name ..." 27 | container_id="$container_name"_"$cu"_"$pytorch_ver" 28 | 29 | py_versions=(3.6 3.7 3.8 3.9) 30 | 31 | for py in "${py_versions[@]}"; do 32 | docker run -itd \ 33 | --name "$container_id" \ 34 | --mount type=bind,source="$(pwd)",target=/detectron2 \ 35 | pytorch/$container_name 36 | 37 | cat <Tensor]: mapping from feature name (e.g., "res2") to tensor 28 | """ 29 | pass 30 | 31 | @property 32 | def size_divisibility(self) -> int: 33 | """ 34 | Some backbones require the input height and width to be divisible by a 35 | specific integer. This is typically true for encoder / decoder type networks 36 | with lateral connection (e.g., FPN) for which feature maps need to match 37 | dimension in the "bottom up" and "top down" paths. Set to 0 if no specific 38 | input size divisibility is required. 39 | """ 40 | return 0 41 | 42 | def output_shape(self): 43 | """ 44 | Returns: 45 | dict[str->ShapeSpec] 46 | """ 47 | # this is a backward-compatible default 48 | return { 49 | name: ShapeSpec( 50 | channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] 51 | ) 52 | for name in self._out_features 53 | } 54 | -------------------------------------------------------------------------------- /detectron2/utils/registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from typing import Any 4 | import pydoc 5 | from fvcore.common.registry import Registry # for backward compatibility. 6 | 7 | """ 8 | ``Registry`` and `locate` provide ways to map a string (typically found 9 | in config files) to callable objects. 10 | """ 11 | 12 | __all__ = ["Registry", "locate"] 13 | 14 | 15 | def _convert_target_to_string(t: Any) -> str: 16 | """ 17 | Inverse of ``locate()``. 18 | 19 | Args: 20 | t: any object with ``__module__`` and ``__qualname__`` 21 | """ 22 | module, qualname = t.__module__, t.__qualname__ 23 | 24 | # Compress the path to this object, e.g. ``module.submodule._impl.class`` 25 | # may become ``module.submodule.class``, if the later also resolves to the same 26 | # object. This simplifies the string, and also is less affected by moving the 27 | # class implementation. 28 | module_parts = module.split(".") 29 | for k in range(1, len(module_parts)): 30 | prefix = ".".join(module_parts[:k]) 31 | candidate = f"{prefix}.{qualname}" 32 | try: 33 | if locate(candidate) is t: 34 | return candidate 35 | except ImportError: 36 | pass 37 | return f"{module}.{qualname}" 38 | 39 | 40 | def locate(name: str) -> Any: 41 | """ 42 | Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, 43 | such as "module.submodule.class_name". 44 | 45 | Raise Exception if it cannot be found. 46 | """ 47 | obj = pydoc.locate(name) 48 | 49 | # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly 50 | # by pydoc.locate. Try a private function from hydra. 51 | if obj is None: 52 | try: 53 | # from hydra.utils import get_method - will print many errors 54 | from hydra.utils import _locate 55 | except ImportError as e: 56 | raise ImportError(f"Cannot dynamically locate object {name}!") from e 57 | else: 58 | obj = _locate(name) # it raises if fails 59 | 60 | return obj 61 | -------------------------------------------------------------------------------- /detectron2/data/samplers/grouped_batch_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import numpy as np 3 | from torch.utils.data.sampler import BatchSampler, Sampler 4 | 5 | 6 | class GroupedBatchSampler(BatchSampler): 7 | """ 8 | Wraps another sampler to yield a mini-batch of indices. 9 | It enforces that the batch only contain elements from the same group. 10 | It also tries to provide mini-batches which follows an ordering which is 11 | as close as possible to the ordering from the original sampler. 12 | """ 13 | 14 | def __init__(self, sampler, group_ids, batch_size): 15 | """ 16 | Args: 17 | sampler (Sampler): Base sampler. 18 | group_ids (list[int]): If the sampler produces indices in range [0, N), 19 | `group_ids` must be a list of `N` ints which contains the group id of each sample. 20 | The group ids must be a set of integers in the range [0, num_groups). 21 | batch_size (int): Size of mini-batch. 22 | """ 23 | if not isinstance(sampler, Sampler): 24 | raise ValueError( 25 | "sampler should be an instance of " 26 | "torch.utils.data.Sampler, but got sampler={}".format(sampler) 27 | ) 28 | self.sampler = sampler 29 | self.group_ids = np.asarray(group_ids) 30 | assert self.group_ids.ndim == 1 31 | self.batch_size = batch_size 32 | groups = np.unique(self.group_ids).tolist() 33 | 34 | # buffer the indices of each group until batch size is reached 35 | self.buffer_per_group = {k: [] for k in groups} 36 | 37 | def __iter__(self): 38 | for idx in self.sampler: 39 | group_id = self.group_ids[idx] 40 | group_buffer = self.buffer_per_group[group_id] 41 | group_buffer.append(idx) 42 | if len(group_buffer) == self.batch_size: 43 | yield group_buffer[:] # yield a copy of the list 44 | del group_buffer[:] 45 | 46 | def __len__(self): 47 | raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.") 48 | -------------------------------------------------------------------------------- /detectron2/modeling/meta_arch/losses.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | import torch.nn.functional as F 8 | import pdb 9 | 10 | class MemConLoss_trans(nn.Module): 11 | def __init__(self, temperature=0.07, contrast_mode='all', base_temperature=0.07): 12 | super(MemConLoss_trans, self).__init__() 13 | self.temperature = temperature 14 | self.contrast_mode = contrast_mode 15 | self.base_temperature = base_temperature 16 | 17 | def get_score(self, mem_bank, query, items=None): 18 | bs, h, w, d = query.size() 19 | m, d = mem_bank.size() 20 | score = torch.matmul(query.float(), torch.t(mem_bank).float())# b X h X w X m 21 | score = score.view(bs*h*w, m) # 300x512 22 | score_memory = F.softmax(score,dim=1) # 300x512 23 | 24 | _, top_neg_idx = torch.topk(score_memory, items, dim=1, largest=False) 25 | 26 | neg_logits = torch.gather(score, 1, top_neg_idx) 27 | 28 | return neg_logits 29 | 30 | def forward(self, s_query, s_box_feat, mem_s_query, s_value, t_box_feat, t_value, mem_bank): 31 | 32 | batch_size, dim = s_query.shape 33 | mask = torch.eye(batch_size, dtype=torch.float32).cuda() 34 | 35 | anchor_feat = F.normalize(s_query, dim=1) 36 | contrast_feat = F.normalize(mem_s_query, dim=1) 37 | 38 | logits = torch.div(torch.matmul(anchor_feat, contrast_feat.T), self.temperature) 39 | logits_max, _ = torch.max(logits, dim=1, keepdim=True) 40 | sm_logits = logits - logits_max.detach() 41 | 42 | mem_query = s_box_feat.mean(dim=[2, 3]).contiguous().unsqueeze(-1).unsqueeze(-1).permute(0,2,3,1).detach() 43 | sm_neg_logits = self.get_score(mem_bank, mem_query, items=5) 44 | 45 | s_all_logits = torch.exp(torch.cat((sm_logits, sm_neg_logits), dim=1)) 46 | log_prob = sm_logits - torch.log(s_all_logits.sum(1, keepdim=True)) 47 | mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1) 48 | 49 | # loss 50 | loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos 51 | 52 | if torch.isnan(loss.mean()): 53 | loss = loss*0 54 | 55 | return loss.mean() 56 | -------------------------------------------------------------------------------- /dev/packaging/gen_install_table.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # -*- coding: utf-8 -*- 4 | 5 | import argparse 6 | 7 | template = """
install
\
 8 | python -m pip install detectron2{d2_version} -f \\
 9 |   https://dl.fbaipublicfiles.com/detectron2/wheels/{cuda}/torch{torch}/index.html
10 | 
""" 11 | CUDA_SUFFIX = { 12 | "11.3": "cu113", 13 | "11.1": "cu111", 14 | "11.0": "cu110", 15 | "10.2": "cu102", 16 | "10.1": "cu101", 17 | "10.0": "cu100", 18 | "9.2": "cu92", 19 | "cpu": "cpu", 20 | } 21 | 22 | 23 | def gen_header(torch_versions): 24 | return '' + "".join( 25 | [ 26 | ''.format(t) 27 | for t in torch_versions 28 | ] 29 | ) 30 | 31 | 32 | if __name__ == "__main__": 33 | parser = argparse.ArgumentParser() 34 | parser.add_argument("--d2-version", help="detectron2 version number, default to empty") 35 | args = parser.parse_args() 36 | d2_version = f"=={args.d2_version}" if args.d2_version else "" 37 | 38 | all_versions = ( 39 | [("1.8", k) for k in ["11.1", "10.2", "10.1", "cpu"]] 40 | + [("1.9", k) for k in ["11.1", "10.2", "cpu"]] 41 | + [("1.10", k) for k in ["11.3", "11.1", "10.2", "cpu"]] 42 | ) 43 | 44 | torch_versions = sorted( 45 | {k[0] for k in all_versions}, key=lambda x: int(x.split(".")[1]), reverse=True 46 | ) 47 | cuda_versions = sorted( 48 | {k[1] for k in all_versions}, key=lambda x: float(x) if x != "cpu" else 0, reverse=True 49 | ) 50 | 51 | table = gen_header(torch_versions) 52 | for cu in cuda_versions: 53 | table += f""" """ 54 | cu_suffix = CUDA_SUFFIX[cu] 55 | for torch in torch_versions: 56 | if (torch, cu) in all_versions: 57 | cell = template.format(d2_version=d2_version, cuda=cu_suffix, torch=torch) 58 | else: 59 | cell = "" 60 | table += f""" """ 61 | table += "" 62 | table += "
CUDA torch {}
{cu}{cell}
" 63 | print(table) 64 | -------------------------------------------------------------------------------- /dev/parse_results.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # A shell script that parses metrics from the log file. 5 | # Make it easier for developers to track performance of models. 6 | 7 | LOG="$1" 8 | 9 | if [[ -z "$LOG" ]]; then 10 | echo "Usage: $0 /path/to/log/file" 11 | exit 1 12 | fi 13 | 14 | # [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it) 15 | # [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / img per device, on 8 devices) 16 | # [12/15 11:49:03] inference INFO: Total inference pure compute time: ..... 17 | 18 | # training time 19 | trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*') 20 | echo "Training speed: $trainspeed s/it" 21 | 22 | # inference time: there could be multiple inference during training 23 | inferencespeed=$(grep -o 'Total inference pure.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1) 24 | echo "Inference speed: $inferencespeed s/it" 25 | 26 | # [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161 27 | memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*') 28 | echo "Training memory: $memory MB" 29 | 30 | echo "Easy to copypaste:" 31 | echo "$trainspeed","$inferencespeed","$memory" 32 | 33 | echo "------------------------------" 34 | 35 | # [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox 36 | # [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl 37 | # [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011 38 | # [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm 39 | # [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl 40 | # [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011 41 | 42 | echo "COCO Results:" 43 | num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l) 44 | # each task has 3 lines 45 | grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3)) 46 | -------------------------------------------------------------------------------- /detectron2/modeling/sampling.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import torch 3 | 4 | from detectron2.layers import nonzero_tuple 5 | 6 | __all__ = ["subsample_labels"] 7 | 8 | 9 | def subsample_labels( 10 | labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int 11 | ): 12 | """ 13 | Return `num_samples` (or fewer, if not enough found) 14 | random samples from `labels` which is a mixture of positives & negatives. 15 | It will try to return as many positives as possible without 16 | exceeding `positive_fraction * num_samples`, and then try to 17 | fill the remaining slots with negatives. 18 | 19 | Args: 20 | labels (Tensor): (N, ) label vector with values: 21 | * -1: ignore 22 | * bg_label: background ("negative") class 23 | * otherwise: one or more foreground ("positive") classes 24 | num_samples (int): The total number of labels with value >= 0 to return. 25 | Values that are not sampled will be filled with -1 (ignore). 26 | positive_fraction (float): The number of subsampled labels with values > 0 27 | is `min(num_positives, int(positive_fraction * num_samples))`. The number 28 | of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. 29 | In order words, if there are not enough positives, the sample is filled with 30 | negatives. If there are also not enough negatives, then as many elements are 31 | sampled as is possible. 32 | bg_label (int): label index of background ("negative") class. 33 | 34 | Returns: 35 | pos_idx, neg_idx (Tensor): 36 | 1D vector of indices. The total length of both is `num_samples` or fewer. 37 | """ 38 | positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] 39 | negative = nonzero_tuple(labels == bg_label)[0] 40 | 41 | num_pos = int(num_samples * positive_fraction) 42 | # protect against not enough positive examples 43 | num_pos = min(positive.numel(), num_pos) 44 | num_neg = num_samples - num_pos 45 | # protect against not enough negative examples 46 | num_neg = min(negative.numel(), num_neg) 47 | 48 | # randomly select positive and negative examples 49 | perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] 50 | perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] 51 | 52 | pos_idx = positive[perm1] 53 | neg_idx = negative[perm2] 54 | return pos_idx, neg_idx 55 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #include "../box_iou_rotated/box_iou_rotated_utils.h" 3 | #include "nms_rotated.h" 4 | 5 | namespace detectron2 { 6 | 7 | template 8 | at::Tensor nms_rotated_cpu_kernel( 9 | const at::Tensor& dets, 10 | const at::Tensor& scores, 11 | const double iou_threshold) { 12 | // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, 13 | // however, the code in this function is much shorter because 14 | // we delegate the IoU computation for rotated boxes to 15 | // the single_box_iou_rotated function in box_iou_rotated_utils.h 16 | AT_ASSERTM(dets.device().is_cpu(), "dets must be a CPU tensor"); 17 | AT_ASSERTM(scores.device().is_cpu(), "scores must be a CPU tensor"); 18 | AT_ASSERTM( 19 | dets.scalar_type() == scores.scalar_type(), 20 | "dets should have the same type as scores"); 21 | 22 | if (dets.numel() == 0) { 23 | return at::empty({0}, dets.options().dtype(at::kLong)); 24 | } 25 | 26 | auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); 27 | 28 | auto ndets = dets.size(0); 29 | at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); 30 | at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); 31 | 32 | auto suppressed = suppressed_t.data_ptr(); 33 | auto keep = keep_t.data_ptr(); 34 | auto order = order_t.data_ptr(); 35 | 36 | int64_t num_to_keep = 0; 37 | 38 | for (int64_t _i = 0; _i < ndets; _i++) { 39 | auto i = order[_i]; 40 | if (suppressed[i] == 1) { 41 | continue; 42 | } 43 | 44 | keep[num_to_keep++] = i; 45 | 46 | for (int64_t _j = _i + 1; _j < ndets; _j++) { 47 | auto j = order[_j]; 48 | if (suppressed[j] == 1) { 49 | continue; 50 | } 51 | 52 | auto ovr = single_box_iou_rotated( 53 | dets[i].data_ptr(), dets[j].data_ptr()); 54 | if (ovr >= iou_threshold) { 55 | suppressed[j] = 1; 56 | } 57 | } 58 | } 59 | return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); 60 | } 61 | 62 | at::Tensor nms_rotated_cpu( 63 | // input must be contiguous 64 | const at::Tensor& dets, 65 | const at::Tensor& scores, 66 | const double iou_threshold) { 67 | auto result = at::empty({0}, dets.options()); 68 | 69 | AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] { 70 | result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); 71 | }); 72 | return result; 73 | } 74 | 75 | } // namespace detectron2 76 | -------------------------------------------------------------------------------- /dev/packaging/pkg_helpers.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # Function to retry functions that sometimes timeout or have flaky failures 5 | retry () { 6 | $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) 7 | } 8 | # Install with pip a bit more robustly than the default 9 | pip_install() { 10 | retry pip install --progress-bar off "$@" 11 | } 12 | 13 | 14 | setup_cuda() { 15 | # Now work out the CUDA settings 16 | # Like other torch domain libraries, we choose common GPU architectures only. 17 | # See https://github.com/pytorch/pytorch/blob/master/torch/utils/cpp_extension.py 18 | # and https://github.com/pytorch/vision/blob/main/packaging/pkg_helpers.bash for reference. 19 | export FORCE_CUDA=1 20 | case "$CU_VERSION" in 21 | cu113) 22 | export CUDA_HOME=/usr/local/cuda-11.3/ 23 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" 24 | ;; 25 | cu112) 26 | export CUDA_HOME=/usr/local/cuda-11.2/ 27 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" 28 | ;; 29 | cu111) 30 | export CUDA_HOME=/usr/local/cuda-11.1/ 31 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" 32 | ;; 33 | cu110) 34 | export CUDA_HOME=/usr/local/cuda-11.0/ 35 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0+PTX" 36 | ;; 37 | cu102) 38 | export CUDA_HOME=/usr/local/cuda-10.2/ 39 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" 40 | ;; 41 | cu101) 42 | export CUDA_HOME=/usr/local/cuda-10.1/ 43 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" 44 | ;; 45 | cu100) 46 | export CUDA_HOME=/usr/local/cuda-10.0/ 47 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" 48 | ;; 49 | cu92) 50 | export CUDA_HOME=/usr/local/cuda-9.2/ 51 | export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0+PTX" 52 | ;; 53 | cpu) 54 | unset FORCE_CUDA 55 | export CUDA_VISIBLE_DEVICES= 56 | ;; 57 | *) 58 | echo "Unrecognized CU_VERSION=$CU_VERSION" 59 | exit 1 60 | ;; 61 | esac 62 | } 63 | 64 | setup_wheel_python() { 65 | case "$PYTHON_VERSION" in 66 | 3.6) python_abi=cp36-cp36m ;; 67 | 3.7) python_abi=cp37-cp37m ;; 68 | 3.8) python_abi=cp38-cp38 ;; 69 | 3.9) python_abi=cp39-cp39 ;; 70 | *) 71 | echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION" 72 | exit 1 73 | ;; 74 | esac 75 | export PATH="/opt/python/$python_abi/bin:$PATH" 76 | } 77 | -------------------------------------------------------------------------------- /detectron2/utils/memory.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | import logging 4 | from contextlib import contextmanager 5 | from functools import wraps 6 | import torch 7 | 8 | __all__ = ["retry_if_cuda_oom"] 9 | 10 | 11 | @contextmanager 12 | def _ignore_torch_cuda_oom(): 13 | """ 14 | A context which ignores CUDA OOM exception from pytorch. 15 | """ 16 | try: 17 | yield 18 | except RuntimeError as e: 19 | # NOTE: the string may change? 20 | if "CUDA out of memory. " in str(e): 21 | pass 22 | else: 23 | raise 24 | 25 | 26 | def retry_if_cuda_oom(func): 27 | """ 28 | Makes a function retry itself after encountering 29 | pytorch's CUDA OOM error. 30 | It will first retry after calling `torch.cuda.empty_cache()`. 31 | 32 | If that still fails, it will then retry by trying to convert inputs to CPUs. 33 | In this case, it expects the function to dispatch to CPU implementation. 34 | The return values may become CPU tensors as well and it's user's 35 | responsibility to convert it back to CUDA tensor if needed. 36 | 37 | Args: 38 | func: a stateless callable that takes tensor-like objects as arguments 39 | 40 | Returns: 41 | a callable which retries `func` if OOM is encountered. 42 | 43 | Examples: 44 | :: 45 | output = retry_if_cuda_oom(some_torch_function)(input1, input2) 46 | # output may be on CPU even if inputs are on GPU 47 | 48 | Note: 49 | 1. When converting inputs to CPU, it will only look at each argument and check 50 | if it has `.device` and `.to` for conversion. Nested structures of tensors 51 | are not supported. 52 | 53 | 2. Since the function might be called more than once, it has to be 54 | stateless. 55 | """ 56 | 57 | def maybe_to_cpu(x): 58 | try: 59 | like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") 60 | except AttributeError: 61 | like_gpu_tensor = False 62 | if like_gpu_tensor: 63 | return x.to(device="cpu") 64 | else: 65 | return x 66 | 67 | @wraps(func) 68 | def wrapped(*args, **kwargs): 69 | with _ignore_torch_cuda_oom(): 70 | return func(*args, **kwargs) 71 | 72 | # Clear cache and retry 73 | torch.cuda.empty_cache() 74 | with _ignore_torch_cuda_oom(): 75 | return func(*args, **kwargs) 76 | 77 | # Try on CPU. This slows down the code significantly, therefore print a notice. 78 | logger = logging.getLogger(__name__) 79 | logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) 80 | new_args = (maybe_to_cpu(x) for x in args) 81 | new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} 82 | return func(*new_args, **new_kwargs) 83 | 84 | return wrapped 85 | -------------------------------------------------------------------------------- /detectron2/evaluation/testing.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import logging 3 | import numpy as np 4 | import pprint 5 | import sys 6 | from collections.abc import Mapping 7 | 8 | import pdb 9 | 10 | 11 | def print_csv_format(results): 12 | """ 13 | Print main metrics in a format similar to Detectron, 14 | so that they are easy to copypaste into a spreadsheet. 15 | 16 | Args: 17 | results (OrderedDict[dict]): task_name -> {metric -> score} 18 | unordered dict can also be printed, but in arbitrary order 19 | """ 20 | assert isinstance(results, Mapping) or not len(results), results 21 | logger = logging.getLogger(__name__) 22 | for task, res in results.items(): 23 | if isinstance(res, Mapping): 24 | # Don't print "AP-category" metrics since they are usually not tracked. 25 | important_res = [(k, v) for k, v in res.items() if "-" not in k] 26 | logger.info("copypaste: Task: {}".format(task)) 27 | logger.info("copypaste: " + ",".join([k[0] for k in important_res])) 28 | logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) 29 | else: 30 | logger.info(f"copypaste: {task}={res}") 31 | 32 | 33 | def verify_results(cfg, results): 34 | """ 35 | Args: 36 | results (OrderedDict[dict]): task_name -> {metric -> score} 37 | 38 | Returns: 39 | bool: whether the verification succeeds or not 40 | """ 41 | expected_results = cfg.TEST.EXPECTED_RESULTS 42 | if not len(expected_results): 43 | return True 44 | 45 | ok = True 46 | for task, metric, expected, tolerance in expected_results: 47 | actual = results[task].get(metric, None) 48 | if actual is None: 49 | ok = False 50 | continue 51 | if not np.isfinite(actual): 52 | ok = False 53 | continue 54 | diff = abs(actual - expected) 55 | if diff > tolerance: 56 | ok = False 57 | 58 | logger = logging.getLogger(__name__) 59 | if not ok: 60 | logger.error("Result verification failed!") 61 | logger.error("Expected Results: " + str(expected_results)) 62 | logger.error("Actual Results: " + pprint.pformat(results)) 63 | 64 | sys.exit(1) 65 | else: 66 | logger.info("Results verification passed.") 67 | return ok 68 | 69 | 70 | def flatten_results_dict(results): 71 | """ 72 | Expand a hierarchical dict of scalars into a flat dict of scalars. 73 | If results[k1][k2][k3] = v, the returned dict will have the entry 74 | {"k1/k2/k3": v}. 75 | 76 | Args: 77 | results (dict): 78 | """ 79 | r = {} 80 | for k, v in results.items(): 81 | if isinstance(v, Mapping): 82 | v = flatten_results_dict(v) 83 | for kk, vv in v.items(): 84 | r[k + "/" + kk] = vv 85 | else: 86 | r[k] = v 87 | return r 88 | -------------------------------------------------------------------------------- /detectron2/config/instantiate.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import dataclasses 3 | import logging 4 | from collections import abc 5 | from typing import Any 6 | 7 | from detectron2.utils.registry import _convert_target_to_string, locate 8 | 9 | __all__ = ["dump_dataclass", "instantiate"] 10 | 11 | 12 | def dump_dataclass(obj: Any): 13 | """ 14 | Dump a dataclass recursively into a dict that can be later instantiated. 15 | 16 | Args: 17 | obj: a dataclass object 18 | 19 | Returns: 20 | dict 21 | """ 22 | assert dataclasses.is_dataclass(obj) and not isinstance( 23 | obj, type 24 | ), "dump_dataclass() requires an instance of a dataclass." 25 | ret = {"_target_": _convert_target_to_string(type(obj))} 26 | for f in dataclasses.fields(obj): 27 | v = getattr(obj, f.name) 28 | if dataclasses.is_dataclass(v): 29 | v = dump_dataclass(v) 30 | if isinstance(v, (list, tuple)): 31 | v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] 32 | ret[f.name] = v 33 | return ret 34 | 35 | 36 | def instantiate(cfg): 37 | """ 38 | Recursively instantiate objects defined in dictionaries by 39 | "_target_" and arguments. 40 | 41 | Args: 42 | cfg: a dict-like object with "_target_" that defines the caller, and 43 | other keys that define the arguments 44 | 45 | Returns: 46 | object instantiated by cfg 47 | """ 48 | from omegaconf import ListConfig 49 | 50 | if isinstance(cfg, ListConfig): 51 | lst = [instantiate(x) for x in cfg] 52 | return ListConfig(lst, flags={"allow_objects": True}) 53 | if isinstance(cfg, list): 54 | # Specialize for list, because many classes take 55 | # list[objects] as arguments, such as ResNet, DatasetMapper 56 | return [instantiate(x) for x in cfg] 57 | 58 | if isinstance(cfg, abc.Mapping) and "_target_" in cfg: 59 | # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, 60 | # but faster: https://github.com/facebookresearch/hydra/issues/1200 61 | cfg = {k: instantiate(v) for k, v in cfg.items()} 62 | cls = cfg.pop("_target_") 63 | cls = instantiate(cls) 64 | 65 | if isinstance(cls, str): 66 | cls_name = cls 67 | cls = locate(cls_name) 68 | assert cls is not None, cls_name 69 | else: 70 | try: 71 | cls_name = cls.__module__ + "." + cls.__qualname__ 72 | except Exception: 73 | # target could be anything, so the above could fail 74 | cls_name = str(cls) 75 | assert callable(cls), f"_target_ {cls} does not define a callable object" 76 | try: 77 | return cls(**cfg) 78 | except TypeError: 79 | logger = logging.getLogger(__name__) 80 | logger.error(f"Error when instantiating {cls_name}!") 81 | raise 82 | return cfg # return as-is if don't know what to do 83 | -------------------------------------------------------------------------------- /detectron2/data/datasets/foggy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_foggy_instances", "register_foggy"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle') 20 | # fmt: on 21 | 22 | def load_foggy_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 23 | """ 24 | Load Foggy detection annotations to Detectron2 format. 25 | 26 | Args: 27 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 28 | split (str): one of "train", "test", "val", "trainval" 29 | class_names: list or tuple of class names 30 | """ 31 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 32 | fileids = np.loadtxt(f, dtype=np.str) 33 | 34 | # Needs to read many small annotation files. Makes sense at local 35 | 36 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 37 | dicts = [] 38 | for fileid in fileids: 39 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 40 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") 41 | 42 | with PathManager.open(anno_file) as f: 43 | tree = ET.parse(f) 44 | 45 | r = { 46 | "file_name": jpeg_file, 47 | "image_id": fileid, 48 | "height": int(tree.findall("./size/height")[0].text), 49 | "width": int(tree.findall("./size/width")[0].text), 50 | } 51 | instances = [] 52 | 53 | for obj in tree.findall("object"): 54 | cls = obj.find("name").text 55 | # We include "difficult" samples in training. 56 | # Based on limited experiments, they don't hurt accuracy. 57 | # difficult = int(obj.find("difficult").text) 58 | # if difficult == 1: 59 | # continue 60 | bbox = obj.find("bndbox") 61 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 62 | # Original annotations are integers in the range [1, W or H] 63 | # Assuming they mean 1-based pixel indices (inclusive), 64 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 65 | # In coordinate space this is represented by (xmin=0, xmax=W) 66 | bbox[0] -= 1.0 67 | bbox[1] -= 1.0 68 | instances.append( 69 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 70 | ) 71 | r["annotations"] = instances 72 | dicts.append(r) 73 | return dicts 74 | 75 | 76 | def register_foggy(name, dirname, split, year, class_names=CLASS_NAMES): 77 | DatasetCatalog.register(name, lambda: load_foggy_instances(dirname, split, class_names)) 78 | MetadataCatalog.get(name).set( 79 | thing_classes=list(class_names), dirname=dirname, year=year, split=split 80 | ) 81 | -------------------------------------------------------------------------------- /detectron2/data/datasets/kitti.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_kitti_instances", "register_kitti"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( "car", "background") 20 | # fmt: on 21 | 22 | def load_kitti_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 23 | """ 24 | Load kitti detection annotations to Detectron2 format. 25 | 26 | Args: 27 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 28 | split (str): one of "train", "test", "val", "trainval" 29 | class_names: list or tuple of class names 30 | """ 31 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 32 | fileids = np.loadtxt(f, dtype=np.str) 33 | 34 | # Needs to read many small annotation files. Makes sense at local 35 | 36 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 37 | dicts = [] 38 | for fileid in fileids: 39 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 40 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".png") 41 | 42 | with PathManager.open(anno_file) as f: 43 | tree = ET.parse(f) 44 | 45 | r = { 46 | "file_name": jpeg_file, 47 | "image_id": fileid, 48 | "height": int(tree.findall("./size/height")[0].text), 49 | "width": int(tree.findall("./size/width")[0].text), 50 | } 51 | instances = [] 52 | for obj in tree.findall("object"): 53 | cls = obj.find("name").text 54 | # We include "difficult" samples in training. 55 | # Based on limited experiments, they don't hurt accuracy. 56 | # difficult = int(obj.find("difficult").text) 57 | # if difficult == 1: 58 | # continue 59 | bbox = obj.find("bndbox") 60 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 61 | # Original annotations are integers in the range [1, W or H] 62 | # Assuming they mean 1-based pixel indices (inclusive), 63 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 64 | # In coordinate space this is represented by (xmin=0, xmax=W) 65 | bbox[0] -= 1.0 66 | bbox[1] -= 1.0 67 | 68 | if cls in class_names: 69 | instances.append( 70 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 71 | ) 72 | r["annotations"] = instances 73 | dicts.append(r) 74 | return dicts 75 | 76 | 77 | def register_kitti(name, dirname, split, year, class_names=CLASS_NAMES): 78 | DatasetCatalog.register(name, lambda: load_kitti_instances(dirname, split, class_names)) 79 | MetadataCatalog.get(name).set( 80 | thing_classes=list(class_names), dirname=dirname, year=year, split=split 81 | ) 82 | -------------------------------------------------------------------------------- /detectron2/data/datasets/sim10k.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_sim10k_instances", "register_sim10k"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( "car", "background") 20 | # fmt: on 21 | 22 | def load_sim10k_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 23 | """ 24 | Load sim10k detection annotations to Detectron2 format. 25 | 26 | Args: 27 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 28 | split (str): one of "train", "test", "val", "trainval" 29 | class_names: list or tuple of class names 30 | """ 31 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 32 | fileids = np.loadtxt(f, dtype=np.str) 33 | 34 | # Needs to read many small annotation files. Makes sense at local 35 | 36 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 37 | dicts = [] 38 | for fileid in fileids: 39 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 40 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") 41 | 42 | with PathManager.open(anno_file) as f: 43 | tree = ET.parse(f) 44 | 45 | r = { 46 | "file_name": jpeg_file, 47 | "image_id": fileid, 48 | "height": int(tree.findall("./size/height")[0].text), 49 | "width": int(tree.findall("./size/width")[0].text), 50 | } 51 | instances = [] 52 | for obj in tree.findall("object"): 53 | cls = obj.find("name").text 54 | # We include "difficult" samples in training. 55 | # Based on limited experiments, they don't hurt accuracy. 56 | # difficult = int(obj.find("difficult").text) 57 | # if difficult == 1: 58 | # continue 59 | bbox = obj.find("bndbox") 60 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 61 | # Original annotations are integers in the range [1, W or H] 62 | # Assuming they mean 1-based pixel indices (inclusive), 63 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 64 | # In coordinate space this is represented by (xmin=0, xmax=W) 65 | bbox[0] -= 1.0 66 | bbox[1] -= 1.0 67 | 68 | if cls in class_names: 69 | instances.append( 70 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 71 | ) 72 | r["annotations"] = instances 73 | dicts.append(r) 74 | return dicts 75 | 76 | 77 | def register_sim10k(name, dirname, split, year, class_names=CLASS_NAMES): 78 | DatasetCatalog.register(name, lambda: load_sim10k_instances(dirname, split, class_names)) 79 | MetadataCatalog.get(name).set( 80 | thing_classes=list(class_names), dirname=dirname, year=year, split=split 81 | ) 82 | -------------------------------------------------------------------------------- /detectron2/data/datasets/cityscape.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_cityscape_instances", "register_cityscape"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle') 20 | # fmt: on 21 | 22 | def load_cityscape_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 23 | """ 24 | Load Cityscape detection annotations to Detectron2 format. 25 | 26 | Args: 27 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 28 | split (str): one of "train", "test", "val", "trainval" 29 | class_names: list or tuple of class names 30 | """ 31 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 32 | fileids = np.loadtxt(f, dtype=np.str) 33 | 34 | # Needs to read many small annotation files. Makes sense at local 35 | 36 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 37 | dicts = [] 38 | for fileid in fileids: 39 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 40 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") 41 | 42 | with PathManager.open(anno_file) as f: 43 | tree = ET.parse(f) 44 | 45 | r = { 46 | "file_name": jpeg_file, 47 | "image_id": fileid, 48 | "height": int(tree.findall("./size/height")[0].text), 49 | "width": int(tree.findall("./size/width")[0].text), 50 | } 51 | instances = [] 52 | 53 | for obj in tree.findall("object"): 54 | cls = obj.find("name").text 55 | # We include "difficult" samples in training. 56 | # Based on limited experiments, they don't hurt accuracy. 57 | # difficult = int(obj.find("difficult").text) 58 | # if difficult == 1: 59 | # continue 60 | bbox = obj.find("bndbox") 61 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 62 | # Original annotations are integers in the range [1, W or H] 63 | # Assuming they mean 1-based pixel indices (inclusive), 64 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 65 | # In coordinate space this is represented by (xmin=0, xmax=W) 66 | bbox[0] -= 1.0 67 | bbox[1] -= 1.0 68 | instances.append( 69 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 70 | ) 71 | r["annotations"] = instances 72 | dicts.append(r) 73 | return dicts 74 | 75 | 76 | def register_cityscape(name, dirname, split, year, class_names=CLASS_NAMES): 77 | DatasetCatalog.register(name, lambda: load_cityscape_instances(dirname, split, class_names)) 78 | MetadataCatalog.get(name).set( 79 | thing_classes=list(class_names), dirname=dirname, year=year, split=split 80 | ) 81 | -------------------------------------------------------------------------------- /detectron2/data/datasets/cityscape_car.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_cityscape_car_instances", "register_cityscape_car"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( "car", "background") 20 | # fmt: on 21 | 22 | def load_cityscape_car_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 23 | """ 24 | Load cityscape_car detection annotations to Detectron2 format. 25 | 26 | Args: 27 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 28 | split (str): one of "train", "test", "val", "trainval" 29 | class_names: list or tuple of class names 30 | """ 31 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 32 | fileids = np.loadtxt(f, dtype=np.str) 33 | 34 | # Needs to read many small annotation files. Makes sense at local 35 | 36 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 37 | dicts = [] 38 | for fileid in fileids: 39 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 40 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") 41 | 42 | with PathManager.open(anno_file) as f: 43 | tree = ET.parse(f) 44 | 45 | r = { 46 | "file_name": jpeg_file, 47 | "image_id": fileid, 48 | "height": int(tree.findall("./size/height")[0].text), 49 | "width": int(tree.findall("./size/width")[0].text), 50 | } 51 | instances = [] 52 | for obj in tree.findall("object"): 53 | cls = obj.find("name").text 54 | # We include "difficult" samples in training. 55 | # Based on limited experiments, they don't hurt accuracy. 56 | # difficult = int(obj.find("difficult").text) 57 | # if difficult == 1: 58 | # continue 59 | bbox = obj.find("bndbox") 60 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 61 | # Original annotations are integers in the range [1, W or H] 62 | # Assuming they mean 1-based pixel indices (inclusive), 63 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 64 | # In coordinate space this is represented by (xmin=0, xmax=W) 65 | bbox[0] -= 1.0 66 | bbox[1] -= 1.0 67 | 68 | if cls in class_names: 69 | instances.append( 70 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 71 | ) 72 | r["annotations"] = instances 73 | dicts.append(r) 74 | return dicts 75 | 76 | 77 | def register_cityscape_car(name, dirname, split, year, class_names=CLASS_NAMES): 78 | DatasetCatalog.register(name, lambda: load_cityscape_car_instances(dirname, split, class_names)) 79 | MetadataCatalog.get(name).set( 80 | thing_classes=list(class_names), dirname=dirname, year=year, split=split 81 | ) 82 | -------------------------------------------------------------------------------- /detectron2/layers/roi_align.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from torch import nn 3 | from torchvision.ops import roi_align 4 | 5 | 6 | # NOTE: torchvision's RoIAlign has a different default aligned=False 7 | class ROIAlign(nn.Module): 8 | def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): 9 | """ 10 | Args: 11 | output_size (tuple): h, w 12 | spatial_scale (float): scale the input boxes by this number 13 | sampling_ratio (int): number of inputs samples to take for each output 14 | sample. 0 to take samples densely. 15 | aligned (bool): if False, use the legacy implementation in 16 | Detectron. If True, align the results more perfectly. 17 | 18 | Note: 19 | The meaning of aligned=True: 20 | 21 | Given a continuous coordinate c, its two neighboring pixel indices (in our 22 | pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, 23 | c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled 24 | from the underlying signal at continuous coordinates 0.5 and 1.5). But the original 25 | roi_align (aligned=False) does not subtract the 0.5 when computing neighboring 26 | pixel indices and therefore it uses pixels with a slightly incorrect alignment 27 | (relative to our pixel model) when performing bilinear interpolation. 28 | 29 | With `aligned=True`, 30 | we first appropriately scale the ROI and then shift it by -0.5 31 | prior to calling roi_align. This produces the correct neighbors; see 32 | detectron2/tests/test_roi_align.py for verification. 33 | 34 | The difference does not make a difference to the model's performance if 35 | ROIAlign is used together with conv layers. 36 | """ 37 | super().__init__() 38 | self.output_size = output_size 39 | self.spatial_scale = spatial_scale 40 | self.sampling_ratio = sampling_ratio 41 | self.aligned = aligned 42 | 43 | from torchvision import __version__ 44 | 45 | version = tuple(int(x) for x in __version__.split(".")[:2]) 46 | # https://github.com/pytorch/vision/pull/2438 47 | assert version >= (0, 7), "Require torchvision >= 0.7" 48 | 49 | def forward(self, input, rois): 50 | """ 51 | Args: 52 | input: NCHW images 53 | rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. 54 | """ 55 | assert rois.dim() == 2 and rois.size(1) == 5 56 | if input.is_quantized: 57 | input = input.dequantize() 58 | return roi_align( 59 | input, 60 | rois.to(dtype=input.dtype), 61 | self.output_size, 62 | self.spatial_scale, 63 | self.sampling_ratio, 64 | self.aligned, 65 | ) 66 | 67 | def __repr__(self): 68 | tmpstr = self.__class__.__name__ + "(" 69 | tmpstr += "output_size=" + str(self.output_size) 70 | tmpstr += ", spatial_scale=" + str(self.spatial_scale) 71 | tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) 72 | tmpstr += ", aligned=" + str(self.aligned) 73 | tmpstr += ")" 74 | return tmpstr 75 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #pragma once 3 | #include 4 | 5 | namespace detectron2 { 6 | 7 | at::Tensor ROIAlignRotated_forward_cpu( 8 | const at::Tensor& input, 9 | const at::Tensor& rois, 10 | const float spatial_scale, 11 | const int pooled_height, 12 | const int pooled_width, 13 | const int sampling_ratio); 14 | 15 | at::Tensor ROIAlignRotated_backward_cpu( 16 | const at::Tensor& grad, 17 | const at::Tensor& rois, 18 | const float spatial_scale, 19 | const int pooled_height, 20 | const int pooled_width, 21 | const int batch_size, 22 | const int channels, 23 | const int height, 24 | const int width, 25 | const int sampling_ratio); 26 | 27 | #if defined(WITH_CUDA) || defined(WITH_HIP) 28 | at::Tensor ROIAlignRotated_forward_cuda( 29 | const at::Tensor& input, 30 | const at::Tensor& rois, 31 | const float spatial_scale, 32 | const int pooled_height, 33 | const int pooled_width, 34 | const int sampling_ratio); 35 | 36 | at::Tensor ROIAlignRotated_backward_cuda( 37 | const at::Tensor& grad, 38 | const at::Tensor& rois, 39 | const float spatial_scale, 40 | const int pooled_height, 41 | const int pooled_width, 42 | const int batch_size, 43 | const int channels, 44 | const int height, 45 | const int width, 46 | const int sampling_ratio); 47 | #endif 48 | 49 | // Interface for Python 50 | inline at::Tensor ROIAlignRotated_forward( 51 | const at::Tensor& input, 52 | const at::Tensor& rois, 53 | const float spatial_scale, 54 | const int pooled_height, 55 | const int pooled_width, 56 | const int sampling_ratio) { 57 | if (input.is_cuda()) { 58 | #if defined(WITH_CUDA) || defined(WITH_HIP) 59 | return ROIAlignRotated_forward_cuda( 60 | input, 61 | rois, 62 | spatial_scale, 63 | pooled_height, 64 | pooled_width, 65 | sampling_ratio); 66 | #else 67 | AT_ERROR("Detectron2 is not compiled with GPU support!"); 68 | #endif 69 | } 70 | return ROIAlignRotated_forward_cpu( 71 | input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); 72 | } 73 | 74 | inline at::Tensor ROIAlignRotated_backward( 75 | const at::Tensor& grad, 76 | const at::Tensor& rois, 77 | const float spatial_scale, 78 | const int pooled_height, 79 | const int pooled_width, 80 | const int batch_size, 81 | const int channels, 82 | const int height, 83 | const int width, 84 | const int sampling_ratio) { 85 | if (grad.is_cuda()) { 86 | #if defined(WITH_CUDA) || defined(WITH_HIP) 87 | return ROIAlignRotated_backward_cuda( 88 | grad, 89 | rois, 90 | spatial_scale, 91 | pooled_height, 92 | pooled_width, 93 | batch_size, 94 | channels, 95 | height, 96 | width, 97 | sampling_ratio); 98 | #else 99 | AT_ERROR("Detectron2 is not compiled with GPU support!"); 100 | #endif 101 | } 102 | return ROIAlignRotated_backward_cpu( 103 | grad, 104 | rois, 105 | spatial_scale, 106 | pooled_height, 107 | pooled_width, 108 | batch_size, 109 | channels, 110 | height, 111 | width, 112 | sampling_ratio); 113 | } 114 | 115 | } // namespace detectron2 116 | -------------------------------------------------------------------------------- /detectron2/data/datasets/clipart.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_clipart_instances", "register_clipart"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( 20 | "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", 21 | "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", 22 | "pottedplant", "sheep", "sofa", "train", "tvmonitor" 23 | ) 24 | # fmt: on 25 | 26 | 27 | def load_clipart_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 28 | """ 29 | Load clipart detection annotations to Detectron2 format. 30 | 31 | Args: 32 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 33 | split (str): one of "train", "test", "val", "trainval" 34 | class_names: list or tuple of class names 35 | """ 36 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 37 | fileids = np.loadtxt(f, dtype=np.str) 38 | 39 | # Needs to read many small annotation files. Makes sense at local 40 | 41 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 42 | dicts = [] 43 | for fileid in fileids: 44 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 45 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") 46 | 47 | with PathManager.open(anno_file) as f: 48 | tree = ET.parse(f) 49 | 50 | r = { 51 | "file_name": jpeg_file, 52 | "image_id": fileid, 53 | "height": int(tree.findall("./size/height")[0].text), 54 | "width": int(tree.findall("./size/width")[0].text), 55 | } 56 | instances = [] 57 | 58 | for obj in tree.findall("object"): 59 | cls = obj.find("name").text 60 | # We include "difficult" samples in training. 61 | # Based on limited experiments, they don't hurt accuracy. 62 | # difficult = int(obj.find("difficult").text) 63 | # if difficult == 1: 64 | # continue 65 | bbox = obj.find("bndbox") 66 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 67 | # Original annotations are integers in the range [1, W or H] 68 | # Assuming they mean 1-based pixel indices (inclusive), 69 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 70 | # In coordinate space this is represented by (xmin=0, xmax=W) 71 | bbox[0] -= 1.0 72 | bbox[1] -= 1.0 73 | instances.append( 74 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 75 | ) 76 | r["annotations"] = instances 77 | dicts.append(r) 78 | return dicts 79 | 80 | 81 | def register_clipart(name, dirname, split, class_names=CLASS_NAMES): 82 | DatasetCatalog.register(name, lambda: load_clipart_instances(dirname, split, class_names)) 83 | MetadataCatalog.get(name).set( 84 | thing_classes=list(class_names), dirname=dirname, split=split 85 | ) 86 | -------------------------------------------------------------------------------- /detectron2/data/datasets/pascal_voc.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_voc_instances", "register_pascal_voc"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( 20 | "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", 21 | "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", 22 | "pottedplant", "sheep", "sofa", "train", "tvmonitor" 23 | ) 24 | # fmt: on 25 | 26 | 27 | def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 28 | """ 29 | Load Pascal VOC detection annotations to Detectron2 format. 30 | 31 | Args: 32 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 33 | split (str): one of "train", "test", "val", "trainval" 34 | class_names: list or tuple of class names 35 | """ 36 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 37 | fileids = np.loadtxt(f, dtype=np.str) 38 | 39 | # Needs to read many small annotation files. Makes sense at local 40 | 41 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 42 | dicts = [] 43 | for fileid in fileids: 44 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 45 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") 46 | 47 | with PathManager.open(anno_file) as f: 48 | tree = ET.parse(f) 49 | 50 | r = { 51 | "file_name": jpeg_file, 52 | "image_id": fileid, 53 | "height": int(tree.findall("./size/height")[0].text), 54 | "width": int(tree.findall("./size/width")[0].text), 55 | } 56 | instances = [] 57 | 58 | for obj in tree.findall("object"): 59 | cls = obj.find("name").text 60 | # We include "difficult" samples in training. 61 | # Based on limited experiments, they don't hurt accuracy. 62 | # difficult = int(obj.find("difficult").text) 63 | # if difficult == 1: 64 | # continue 65 | bbox = obj.find("bndbox") 66 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 67 | # Original annotations are integers in the range [1, W or H] 68 | # Assuming they mean 1-based pixel indices (inclusive), 69 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 70 | # In coordinate space this is represented by (xmin=0, xmax=W) 71 | bbox[0] -= 1.0 72 | bbox[1] -= 1.0 73 | instances.append( 74 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 75 | ) 76 | r["annotations"] = instances 77 | dicts.append(r) 78 | return dicts 79 | 80 | 81 | def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES): 82 | DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names)) 83 | MetadataCatalog.get(name).set( 84 | thing_classes=list(class_names), dirname=dirname, year=year, split=split 85 | ) 86 | -------------------------------------------------------------------------------- /detectron2/data/datasets/watercolor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import numpy as np 5 | import os 6 | import xml.etree.ElementTree as ET 7 | from typing import List, Tuple, Union 8 | 9 | from detectron2.data import DatasetCatalog, MetadataCatalog 10 | from detectron2.structures import BoxMode 11 | from detectron2.utils.file_io import PathManager 12 | 13 | import pdb 14 | 15 | __all__ = ["load_watercolor_instances", "register_watercolor"] 16 | 17 | 18 | # fmt: off 19 | CLASS_NAMES = ( 20 | "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", 21 | "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", 22 | "pottedplant", "sheep", "sofa", "train", "tvmonitor" 23 | ) 24 | # fmt: on 25 | 26 | 27 | def load_watercolor_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): 28 | """ 29 | Load watercolor detection annotations to Detectron2 format. 30 | 31 | Args: 32 | dirname: Contain "Annotations", "ImageSets", "JPEGImages" 33 | split (str): one of "train", "test", "val", "trainval" 34 | class_names: list or tuple of class names 35 | """ 36 | with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: 37 | fileids = np.loadtxt(f, dtype=np.str) 38 | 39 | # Needs to read many small annotation files. Makes sense at local 40 | 41 | annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) 42 | dicts = [] 43 | for fileid in fileids: 44 | anno_file = os.path.join(annotation_dirname, fileid + ".xml") 45 | jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") 46 | 47 | with PathManager.open(anno_file) as f: 48 | tree = ET.parse(f) 49 | 50 | r = { 51 | "file_name": jpeg_file, 52 | "image_id": fileid, 53 | "height": int(tree.findall("./size/height")[0].text), 54 | "width": int(tree.findall("./size/width")[0].text), 55 | } 56 | instances = [] 57 | 58 | for obj in tree.findall("object"): 59 | cls = obj.find("name").text 60 | # We include "difficult" samples in training. 61 | # Based on limited experiments, they don't hurt accuracy. 62 | # difficult = int(obj.find("difficult").text) 63 | # if difficult == 1: 64 | # continue 65 | bbox = obj.find("bndbox") 66 | bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] 67 | # Original annotations are integers in the range [1, W or H] 68 | # Assuming they mean 1-based pixel indices (inclusive), 69 | # a box with annotation (xmin=1, xmax=W) covers the whole image. 70 | # In coordinate space this is represented by (xmin=0, xmax=W) 71 | bbox[0] -= 1.0 72 | bbox[1] -= 1.0 73 | instances.append( 74 | {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} 75 | ) 76 | r["annotations"] = instances 77 | dicts.append(r) 78 | return dicts 79 | 80 | 81 | def register_watercolor(name, dirname, split, class_names=CLASS_NAMES): 82 | DatasetCatalog.register(name, lambda: load_watercolor_instances(dirname, split, class_names)) 83 | MetadataCatalog.get(name).set( 84 | thing_classes=list(class_names), dirname=dirname, split=split 85 | ) 86 | -------------------------------------------------------------------------------- /detectron2/layers/blocks.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | import fvcore.nn.weight_init as weight_init 5 | from torch import nn 6 | 7 | from .batch_norm import FrozenBatchNorm2d, get_norm 8 | from .wrappers import Conv2d 9 | 10 | 11 | """ 12 | CNN building blocks. 13 | """ 14 | 15 | 16 | class CNNBlockBase(nn.Module): 17 | """ 18 | A CNN block is assumed to have input channels, output channels and a stride. 19 | The input and output of `forward()` method must be NCHW tensors. 20 | The method can perform arbitrary computation but must match the given 21 | channels and stride specification. 22 | 23 | Attribute: 24 | in_channels (int): 25 | out_channels (int): 26 | stride (int): 27 | """ 28 | 29 | def __init__(self, in_channels, out_channels, stride): 30 | """ 31 | The `__init__` method of any subclass should also contain these arguments. 32 | 33 | Args: 34 | in_channels (int): 35 | out_channels (int): 36 | stride (int): 37 | """ 38 | super().__init__() 39 | self.in_channels = in_channels 40 | self.out_channels = out_channels 41 | self.stride = stride 42 | 43 | def freeze(self): 44 | """ 45 | Make this block not trainable. 46 | This method sets all parameters to `requires_grad=False`, 47 | and convert all BatchNorm layers to FrozenBatchNorm 48 | 49 | Returns: 50 | the block itself 51 | """ 52 | for p in self.parameters(): 53 | p.requires_grad = False 54 | FrozenBatchNorm2d.convert_frozen_batchnorm(self) 55 | return self 56 | 57 | 58 | class DepthwiseSeparableConv2d(nn.Module): 59 | """ 60 | A kxk depthwise convolution + a 1x1 convolution. 61 | 62 | In :paper:`xception`, norm & activation are applied on the second conv. 63 | :paper:`mobilenet` uses norm & activation on both convs. 64 | """ 65 | 66 | def __init__( 67 | self, 68 | in_channels, 69 | out_channels, 70 | kernel_size=3, 71 | padding=1, 72 | dilation=1, 73 | *, 74 | norm1=None, 75 | activation1=None, 76 | norm2=None, 77 | activation2=None, 78 | ): 79 | """ 80 | Args: 81 | norm1, norm2 (str or callable): normalization for the two conv layers. 82 | activation1, activation2 (callable(Tensor) -> Tensor): activation 83 | function for the two conv layers. 84 | """ 85 | super().__init__() 86 | self.depthwise = Conv2d( 87 | in_channels, 88 | in_channels, 89 | kernel_size=kernel_size, 90 | padding=padding, 91 | dilation=dilation, 92 | groups=in_channels, 93 | bias=not norm1, 94 | norm=get_norm(norm1, in_channels), 95 | activation=activation1, 96 | ) 97 | self.pointwise = Conv2d( 98 | in_channels, 99 | out_channels, 100 | kernel_size=1, 101 | bias=not norm2, 102 | norm=get_norm(norm2, out_channels), 103 | activation=activation2, 104 | ) 105 | 106 | # default initialization 107 | weight_init.c2_msra_fill(self.depthwise) 108 | weight_init.c2_msra_fill(self.pointwise) 109 | 110 | def forward(self, x): 111 | return self.pointwise(self.depthwise(x)) 112 | -------------------------------------------------------------------------------- /detectron2/layers/roi_align_rotated.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import torch 3 | from torch import nn 4 | from torch.autograd import Function 5 | from torch.autograd.function import once_differentiable 6 | from torch.nn.modules.utils import _pair 7 | 8 | from detectron2 import _C 9 | 10 | 11 | class _ROIAlignRotated(Function): 12 | @staticmethod 13 | def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): 14 | ctx.save_for_backward(roi) 15 | ctx.output_size = _pair(output_size) 16 | ctx.spatial_scale = spatial_scale 17 | ctx.sampling_ratio = sampling_ratio 18 | ctx.input_shape = input.size() 19 | output = _C.roi_align_rotated_forward( 20 | input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio 21 | ) 22 | return output 23 | 24 | @staticmethod 25 | @once_differentiable 26 | def backward(ctx, grad_output): 27 | (rois,) = ctx.saved_tensors 28 | output_size = ctx.output_size 29 | spatial_scale = ctx.spatial_scale 30 | sampling_ratio = ctx.sampling_ratio 31 | bs, ch, h, w = ctx.input_shape 32 | grad_input = _C.roi_align_rotated_backward( 33 | grad_output, 34 | rois, 35 | spatial_scale, 36 | output_size[0], 37 | output_size[1], 38 | bs, 39 | ch, 40 | h, 41 | w, 42 | sampling_ratio, 43 | ) 44 | return grad_input, None, None, None, None, None 45 | 46 | 47 | roi_align_rotated = _ROIAlignRotated.apply 48 | 49 | 50 | class ROIAlignRotated(nn.Module): 51 | def __init__(self, output_size, spatial_scale, sampling_ratio): 52 | """ 53 | Args: 54 | output_size (tuple): h, w 55 | spatial_scale (float): scale the input boxes by this number 56 | sampling_ratio (int): number of inputs samples to take for each output 57 | sample. 0 to take samples densely. 58 | 59 | Note: 60 | ROIAlignRotated supports continuous coordinate by default: 61 | Given a continuous coordinate c, its two neighboring pixel indices (in our 62 | pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, 63 | c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled 64 | from the underlying signal at continuous coordinates 0.5 and 1.5). 65 | """ 66 | super(ROIAlignRotated, self).__init__() 67 | self.output_size = output_size 68 | self.spatial_scale = spatial_scale 69 | self.sampling_ratio = sampling_ratio 70 | 71 | def forward(self, input, rois): 72 | """ 73 | Args: 74 | input: NCHW images 75 | rois: Bx6 boxes. First column is the index into N. 76 | The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees). 77 | """ 78 | assert rois.dim() == 2 and rois.size(1) == 6 79 | orig_dtype = input.dtype 80 | if orig_dtype == torch.float16: 81 | input = input.float() 82 | rois = rois.float() 83 | return roi_align_rotated( 84 | input, rois, self.output_size, self.spatial_scale, self.sampling_ratio 85 | ).to(dtype=orig_dtype) 86 | 87 | def __repr__(self): 88 | tmpstr = self.__class__.__name__ + "(" 89 | tmpstr += "output_size=" + str(self.output_size) 90 | tmpstr += ", spatial_scale=" + str(self.spatial_scale) 91 | tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) 92 | tmpstr += ")" 93 | return tmpstr 94 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/cocoeval/cocoeval.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace py = pybind11; 11 | 12 | namespace detectron2 { 13 | 14 | namespace COCOeval { 15 | 16 | // Annotation data for a single object instance in an image 17 | struct InstanceAnnotation { 18 | InstanceAnnotation( 19 | uint64_t id, 20 | double score, 21 | double area, 22 | bool is_crowd, 23 | bool ignore) 24 | : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {} 25 | uint64_t id; 26 | double score = 0.; 27 | double area = 0.; 28 | bool is_crowd = false; 29 | bool ignore = false; 30 | }; 31 | 32 | // Stores intermediate results for evaluating detection results for a single 33 | // image that has D detected instances and G ground truth instances. This stores 34 | // matches between detected and ground truth instances 35 | struct ImageEvaluation { 36 | // For each of the D detected instances, the id of the matched ground truth 37 | // instance, or 0 if unmatched 38 | std::vector detection_matches; 39 | 40 | // The detection score of each of the D detected instances 41 | std::vector detection_scores; 42 | 43 | // Marks whether or not each of G instances was ignored from evaluation (e.g., 44 | // because it's outside area_range) 45 | std::vector ground_truth_ignores; 46 | 47 | // Marks whether or not each of D instances was ignored from evaluation (e.g., 48 | // because it's outside aRng) 49 | std::vector detection_ignores; 50 | }; 51 | 52 | template 53 | using ImageCategoryInstances = std::vector>>; 54 | 55 | // C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each 56 | // combination of image, category, area range settings, and IOU thresholds to 57 | // evaluate, it matches detected instances to ground truth instances and stores 58 | // the results into a vector of ImageEvaluation results, which will be 59 | // interpreted by the COCOeval::Accumulate() function to produce precion-recall 60 | // curves. The parameters of nested vectors have the following semantics: 61 | // image_category_ious[i][c][d][g] is the intersection over union of the d'th 62 | // detected instance and g'th ground truth instance of 63 | // category category_ids[c] in image image_ids[i] 64 | // image_category_ground_truth_instances[i][c] is a vector of ground truth 65 | // instances in image image_ids[i] of category category_ids[c] 66 | // image_category_detection_instances[i][c] is a vector of detected 67 | // instances in image image_ids[i] of category category_ids[c] 68 | std::vector EvaluateImages( 69 | const std::vector>& area_ranges, // vector of 2-tuples 70 | int max_detections, 71 | const std::vector& iou_thresholds, 72 | const ImageCategoryInstances>& image_category_ious, 73 | const ImageCategoryInstances& 74 | image_category_ground_truth_instances, 75 | const ImageCategoryInstances& 76 | image_category_detection_instances); 77 | 78 | // C++ implementation of COCOeval.accumulate(), which generates precision 79 | // recall curves for each set of category, IOU threshold, detection area range, 80 | // and max number of detections parameters. It is assumed that the parameter 81 | // evaluations is the return value of the functon COCOeval::EvaluateImages(), 82 | // which was called with the same parameter settings params 83 | py::dict Accumulate( 84 | const py::object& params, 85 | const std::vector& evalutations); 86 | 87 | } // namespace COCOeval 88 | } // namespace detectron2 89 | -------------------------------------------------------------------------------- /tools/deploy/README.md: -------------------------------------------------------------------------------- 1 | See [deployment tutorial](https://detectron2.readthedocs.io/tutorials/deployment.html) 2 | for some high-level background about deployment. 3 | 4 | This directory contains the following examples: 5 | 6 | 1. An example script `export_model.py` (previously called `caffe2_converter.py`) 7 | that exports a detectron2 model for deployment using different methods and formats. 8 | 9 | 2. A few C++ examples that run inference with Mask R-CNN model in Caffe2/TorchScript format. 10 | 11 | ## Build 12 | All C++ examples depend on libtorch and OpenCV. Some require more dependencies: 13 | 14 | * Running caffe2-format models requires: 15 | * libtorch built with caffe2 inside 16 | * gflags, glog 17 | * protobuf library that matches the version used by PyTorch (version defined in `include/caffe2/proto/caffe2.pb.h` of your PyTorch installation) 18 | * MKL headers if caffe2 is built with MKL 19 | * Running TorchScript-format models produced by `--export-method=caffe2_tracing` requires no other dependencies. 20 | * Running TorchScript-format models produced by `--export-method=tracing` requires libtorchvision (C++ library of torchvision). 21 | 22 | We build all examples with one `CMakeLists.txt` that requires all the above dependencies. 23 | Adjust it if you only need one example. 24 | As a reference, 25 | we provide a [Dockerfile](../../docker/deploy.Dockerfile) that 26 | installs all the above dependencies and builds the C++ examples. 27 | 28 | ## Use 29 | 30 | We show a few example commands to export and execute a Mask R-CNN model in C++. 31 | 32 | * `export-method=caffe2_tracing, format=caffe2`: 33 | ``` 34 | ./export_model.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ 35 | --output ./output --export-method caffe2_tracing --format caffe2 \ 36 | MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \ 37 | MODEL.DEVICE cpu 38 | 39 | ./build/caffe2_mask_rcnn --predict_net=output/model.pb --init_net=output/model_init.pb --input=input.jpg 40 | ``` 41 | 42 | * `export-method=caffe2_tracing, format=torchscript`: 43 | 44 | ``` 45 | ./export_model.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ 46 | --output ./output --export-method caffe2_tracing --format torchscript \ 47 | MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \ 48 | MODEL.DEVICE cpu 49 | 50 | ./build/torchscript_traced_mask_rcnn output/model.ts input.jpg caffe2_tracing 51 | ``` 52 | 53 | * `export-method=tracing, format=torchscript`: 54 | 55 | ``` 56 | # this example also tries GPU instead of CPU 57 | ./export_model.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ 58 | --output ./output --export-method tracing --format torchscript \ 59 | MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \ 60 | MODEL.DEVICE cuda 61 | 62 | ./build/torchscript_traced_mask_rcnn output/model.ts input.jpg tracing 63 | ``` 64 | 65 | ## Notes: 66 | 67 | 1. Tracing/Caffe2-tracing requires valid weights & sample inputs. 68 | Therefore the above commands require pre-trained models and [COCO dataset](https://detectron2.readthedocs.io/tutorials/builtin_datasets.html). 69 | You can modify the script to obtain sample inputs in other ways instead of from COCO. 70 | 71 | 2. `--run-eval` flag can be used under certain modes 72 | (caffe2_tracing with caffe2 format, or tracing with torchscript format) 73 | to evaluate the exported model using the dataset in the config. 74 | It's recommended to always verify the accuracy in case the conversion is not successful. 75 | Evaluation can be slow if model is exported to CPU or dataset is too large ("coco_2017_val_100" is a small subset of COCO useful for evaluation). 76 | Caffe2 accuracy may be slightly different (within 0.1 AP) from original model due to numerical precisions between different runtime. 77 | -------------------------------------------------------------------------------- /detectron2/layers/csrc/vision.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | #include 4 | #include "ROIAlignRotated/ROIAlignRotated.h" 5 | #include "box_iou_rotated/box_iou_rotated.h" 6 | #include "cocoeval/cocoeval.h" 7 | #include "deformable/deform_conv.h" 8 | #include "nms_rotated/nms_rotated.h" 9 | 10 | namespace detectron2 { 11 | 12 | #if defined(WITH_CUDA) || defined(WITH_HIP) 13 | extern int get_cudart_version(); 14 | #endif 15 | 16 | std::string get_cuda_version() { 17 | #if defined(WITH_CUDA) || defined(WITH_HIP) 18 | std::ostringstream oss; 19 | 20 | #if defined(WITH_CUDA) 21 | oss << "CUDA "; 22 | #else 23 | oss << "HIP "; 24 | #endif 25 | 26 | // copied from 27 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 28 | auto printCudaStyleVersion = [&](int v) { 29 | oss << (v / 1000) << "." << (v / 10 % 100); 30 | if (v % 10 != 0) { 31 | oss << "." << (v % 10); 32 | } 33 | }; 34 | printCudaStyleVersion(get_cudart_version()); 35 | return oss.str(); 36 | #else // neither CUDA nor HIP 37 | return std::string("not available"); 38 | #endif 39 | } 40 | 41 | bool has_cuda() { 42 | #if defined(WITH_CUDA) 43 | return true; 44 | #else 45 | return false; 46 | #endif 47 | } 48 | 49 | // similar to 50 | // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp 51 | std::string get_compiler_version() { 52 | std::ostringstream ss; 53 | #if defined(__GNUC__) 54 | #ifndef __clang__ 55 | 56 | #if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8)) 57 | #error "GCC >= 4.9 is required!" 58 | #endif 59 | 60 | { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } 61 | #endif 62 | #endif 63 | 64 | #if defined(__clang_major__) 65 | { 66 | ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." 67 | << __clang_patchlevel__; 68 | } 69 | #endif 70 | 71 | #if defined(_MSC_VER) 72 | { ss << "MSVC " << _MSC_FULL_VER; } 73 | #endif 74 | return ss.str(); 75 | } 76 | 77 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 78 | m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); 79 | m.def("get_cuda_version", &get_cuda_version, "get_cuda_version"); 80 | m.def("has_cuda", &has_cuda, "has_cuda"); 81 | 82 | m.def("box_iou_rotated", &box_iou_rotated, "IoU for rotated boxes"); 83 | 84 | m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); 85 | m.def( 86 | "deform_conv_backward_input", 87 | &deform_conv_backward_input, 88 | "deform_conv_backward_input"); 89 | m.def( 90 | "deform_conv_backward_filter", 91 | &deform_conv_backward_filter, 92 | "deform_conv_backward_filter"); 93 | m.def( 94 | "modulated_deform_conv_forward", 95 | &modulated_deform_conv_forward, 96 | "modulated_deform_conv_forward"); 97 | m.def( 98 | "modulated_deform_conv_backward", 99 | &modulated_deform_conv_backward, 100 | "modulated_deform_conv_backward"); 101 | 102 | m.def("nms_rotated", &nms_rotated, "NMS for rotated boxes"); 103 | 104 | m.def( 105 | "roi_align_rotated_forward", 106 | &ROIAlignRotated_forward, 107 | "Forward pass for Rotated ROI-Align Operator"); 108 | m.def( 109 | "roi_align_rotated_backward", 110 | &ROIAlignRotated_backward, 111 | "Backward pass for Rotated ROI-Align Operator"); 112 | 113 | m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); 114 | m.def( 115 | "COCOevalEvaluateImages", 116 | &COCOeval::EvaluateImages, 117 | "COCOeval::EvaluateImages"); 118 | pybind11::class_(m, "InstanceAnnotation") 119 | .def(pybind11::init()); 120 | pybind11::class_(m, "ImageEvaluation") 121 | .def(pybind11::init<>()); 122 | } 123 | 124 | #ifdef TORCH_LIBRARY 125 | TORCH_LIBRARY(detectron2, m) { 126 | m.def("nms_rotated", &nms_rotated); 127 | } 128 | #endif 129 | } // namespace detectron2 130 | --------------------------------------------------------------------------------