├── .gitignore ├── 5763.mp4 ├── 5763.pdf ├── README.md ├── figs └── overview.png ├── mmdetection3d ├── README.md └── mmdetection3d │ ├── .DS_Store │ ├── checkpoints │ └── .DS_Store │ ├── configs │ ├── .DS_Store │ ├── 3dssd │ │ ├── 3dssd_kitti-3d-car.py │ │ ├── README.md │ │ └── metafile.yml │ ├── _base_ │ │ ├── .DS_Store │ │ ├── datasets │ │ │ ├── .DS_Store │ │ │ ├── coco_instance.py │ │ │ ├── kitti-3d-3class.py │ │ │ ├── kitti-3d-3class_flow.py │ │ │ ├── kitti-3d-car.py │ │ │ ├── lyft-3d.py │ │ │ ├── nuim_instance.py │ │ │ ├── nus-3d.py │ │ │ ├── nus-3d_basic.py │ │ │ ├── nus-3d_flow.py │ │ │ ├── nus-mono3d.py │ │ │ ├── range100_lyft-3d.py │ │ │ ├── s3dis_seg-3d-13class.py │ │ │ ├── scannet-3d-18class.py │ │ │ ├── scannet_seg-3d-20class.py │ │ │ ├── sunrgbd-3d-10class.py │ │ │ ├── waymoD5-3d-3class.py │ │ │ └── waymoD5-3d-car.py │ │ ├── default_runtime.py │ │ ├── models │ │ │ ├── .DS_Store │ │ │ ├── 3dssd.py │ │ │ ├── cascade_mask_rcnn_r50_fpn.py │ │ │ ├── centerpoint_01voxel_second_secfpn_nus.py │ │ │ ├── centerpoint_02pillar_second_secfpn_nus.py │ │ │ ├── centerpoint_02pillar_second_secfpn_nus_ours.py │ │ │ ├── fcos3d.py │ │ │ ├── h3dnet.py │ │ │ ├── hv_pointpillars_fpn_lyft.py │ │ │ ├── hv_pointpillars_fpn_nus.py │ │ │ ├── hv_pointpillars_fpn_nus_basic.py │ │ │ ├── hv_pointpillars_fpn_nus_flow.py │ │ │ ├── hv_pointpillars_fpn_range100_lyft.py │ │ │ ├── hv_pointpillars_secfpn_kitti.py │ │ │ ├── hv_pointpillars_secfpn_kitti_flow.py │ │ │ ├── hv_pointpillars_secfpn_waymo.py │ │ │ ├── hv_second_secfpn_kitti.py │ │ │ ├── hv_second_secfpn_waymo.py │ │ │ ├── imvotenet_image.py │ │ │ ├── mask_rcnn_r50_fpn.py │ │ │ ├── parta2.py │ │ │ ├── pointnet2_msg.py │ │ │ ├── pointnet2_ssg.py │ │ │ └── votenet.py │ │ └── schedules │ │ │ ├── cosine.py │ │ │ ├── cyclic_20e.py │ │ │ ├── cyclic_40e.py │ │ │ ├── mmdet_schedule_1x.py │ │ │ ├── schedule_2x.py │ │ │ ├── schedule_3x.py │ │ │ ├── seg_cosine_200e.py │ │ │ └── seg_cosine_50e.py │ ├── benchmark │ │ ├── hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py │ │ ├── hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py │ │ ├── hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py │ │ └── hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py │ ├── centerpoint │ │ ├── README.md │ │ ├── centerpoint_0075voxel_second_secfpn_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_0075voxel_second_secfpn_dcn_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_0075voxel_second_secfpn_dcn_4x8_cyclic_flip-tta_20e_nus.py │ │ ├── centerpoint_0075voxel_second_secfpn_dcn_4x8_cyclic_tta_20e_nus.py │ │ ├── centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_flip-tta_20e_nus.py │ │ ├── centerpoint_01voxel_second_secfpn_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_01voxel_second_secfpn_dcn_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_02pillar_second_secfpn_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_02pillar_second_secfpn_4x8_cyclic_20e_nus_ours.py │ │ ├── centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus.py │ │ ├── centerpoint_02pillar_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus.py │ │ └── metafile.yml │ ├── dynamic_voxelization │ │ ├── README.md │ │ ├── dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py │ │ ├── dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py │ │ ├── dv_second_secfpn_6x8_80e_kitti-3d-car.py │ │ └── metafile.yml │ ├── fcos3d │ │ ├── README.md │ │ ├── fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d.py │ │ └── fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune.py │ ├── fp16 │ │ ├── README.md │ │ ├── hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d.py │ │ ├── hv_pointpillars_regnet-400mf_fpn_sbn-all_fp16_2x8_2x_nus-3d.py │ │ ├── hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d.py │ │ ├── hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py │ │ ├── hv_second_secfpn_fp16_6x8_80e_kitti-3d-car.py │ │ └── metafile.yml │ ├── free_anchor │ │ ├── README.md │ │ ├── hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d.py │ │ ├── hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_strong-aug_4x8_3x_nus-3d.py │ │ ├── hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py │ │ └── metafile.yml │ ├── h3dnet │ │ ├── README.md │ │ ├── h3dnet_3x8_scannet-3d-18class.py │ │ └── metafile.yml │ ├── imvotenet │ │ ├── README.md │ │ ├── imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class.py │ │ ├── imvotenet_stage2_16x8_sunrgbd-3d-10class.py │ │ └── metafile.yml │ ├── mvxnet │ │ ├── README.md │ │ ├── dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py │ │ └── metafile.yml │ ├── nuimages │ │ ├── README.md │ │ ├── cascade_mask_rcnn_r101_fpn_1x_nuim.py │ │ ├── cascade_mask_rcnn_r50_fpn_1x_nuim.py │ │ ├── cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim.py │ │ ├── cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim.py │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim.py │ │ ├── htc_r50_fpn_1x_nuim.py │ │ ├── htc_r50_fpn_coco-20e_1x_nuim.py │ │ ├── htc_r50_fpn_coco-20e_20e_nuim.py │ │ ├── htc_without_semantic_r50_fpn_1x_nuim.py │ │ ├── htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim.py │ │ ├── mask_rcnn_r101_fpn_1x_nuim.py │ │ ├── mask_rcnn_r50_caffe_fpn_1x_nuim.py │ │ ├── mask_rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py │ │ ├── mask_rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py │ │ ├── mask_rcnn_r50_fpn_1x_nuim.py │ │ ├── mask_rcnn_r50_fpn_coco-2x_1x_nuim.py │ │ ├── mask_rcnn_r50_fpn_coco-2x_1x_nus-2d.py │ │ ├── mask_rcnn_x101_32x4d_fpn_1x_nuim.py │ │ └── metafile.yml │ ├── parta2 │ │ ├── README.md │ │ ├── hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class.py │ │ ├── hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car.py │ │ └── metafile.yml │ ├── pointnet2 │ │ ├── README.md │ │ ├── metafile.yml │ │ ├── pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class.py │ │ ├── pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class.py │ │ ├── pointnet2_msg_xyz-only_16x2_cosine_250e_scannet_seg-3d-20class.py │ │ ├── pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class.py │ │ ├── pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class.py │ │ └── pointnet2_ssg_xyz-only_16x2_cosine_200e_scannet_seg-3d-20class.py │ ├── pointpillars │ │ ├── .DS_Store │ │ ├── README.md │ │ ├── hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d.py │ │ ├── hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_baseline.py │ │ ├── hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_det.py │ │ ├── hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_flow.py │ │ ├── hv_pointpillars_fpn_sbn-all_range100_2x8_2x_lyft-3d.py │ │ ├── hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class.py │ │ ├── hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_flow.py │ │ ├── hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py │ │ ├── hv_pointpillars_secfpn_kitti_det.py │ │ ├── hv_pointpillars_secfpn_sbn-all_2x8_2x_lyft-3d.py │ │ ├── hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_secfpn_sbn-all_range100_2x8_2x_lyft-3d.py │ │ ├── hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-3class.py │ │ ├── hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-car.py │ │ ├── hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py │ │ ├── hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car.py │ │ └── metafile.yml │ ├── regnet │ │ ├── README.md │ │ ├── hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d.py │ │ ├── hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_regnet-400mf_fpn_sbn-all_range100_2x8_2x_lyft-3d.py │ │ ├── hv_pointpillars_regnet-400mf_secfpn_sbn-all_2x8_2x_lyft-3d.py │ │ ├── hv_pointpillars_regnet-400mf_secfpn_sbn-all_4x8_2x_nus-3d.py │ │ ├── hv_pointpillars_regnet-400mf_secfpn_sbn-all_range100_2x8_2x_lyft-3d.py │ │ └── metafile.yml │ ├── second │ │ ├── README.md │ │ ├── hv_second_secfpn_6x8_80e_kitti-3d-3class.py │ │ ├── hv_second_secfpn_6x8_80e_kitti-3d-car.py │ │ ├── hv_second_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py │ │ └── metafile.yml │ ├── ssn │ │ ├── README.md │ │ ├── hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d.py │ │ ├── hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d.py │ │ ├── hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d.py │ │ ├── hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d.py │ │ └── metafile.yml │ └── votenet │ │ ├── README.md │ │ ├── metafile.yml │ │ ├── votenet_16x8_sunrgbd-3d-10class.py │ │ ├── votenet_8x8_scannet-3d-18class.py │ │ └── votenet_iouloss_8x8_scannet-3d-18class.py │ ├── mmdet │ ├── .DS_Store │ ├── __init__.py │ ├── apis │ │ ├── .DS_Store │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── .DS_Store │ │ ├── __init__.py │ │ ├── anchor │ │ │ ├── __init__.py │ │ │ ├── anchor_generator.py │ │ │ ├── builder.py │ │ │ ├── point_generator.py │ │ │ └── utils.py │ │ ├── bbox │ │ │ ├── __init__.py │ │ │ ├── assigners │ │ │ │ ├── __init__.py │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ ├── assign_result.py │ │ │ │ ├── atss_assigner.py │ │ │ │ ├── base_assigner.py │ │ │ │ ├── center_region_assigner.py │ │ │ │ ├── grid_assigner.py │ │ │ │ ├── hungarian_assigner.py │ │ │ │ ├── max_iou_assigner.py │ │ │ │ ├── point_assigner.py │ │ │ │ └── region_assigner.py │ │ │ ├── builder.py │ │ │ ├── coder │ │ │ │ ├── __init__.py │ │ │ │ ├── base_bbox_coder.py │ │ │ │ ├── bucketing_bbox_coder.py │ │ │ │ ├── delta_xywh_bbox_coder.py │ │ │ │ ├── legacy_delta_xywh_bbox_coder.py │ │ │ │ ├── pseudo_bbox_coder.py │ │ │ │ ├── tblr_bbox_coder.py │ │ │ │ └── yolo_bbox_coder.py │ │ │ ├── demodata.py │ │ │ ├── iou_calculators │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ └── iou2d_calculator.py │ │ │ ├── match_costs │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ └── match_cost.py │ │ │ ├── samplers │ │ │ │ ├── __init__.py │ │ │ │ ├── base_sampler.py │ │ │ │ ├── combined_sampler.py │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ ├── ohem_sampler.py │ │ │ │ ├── pseudo_sampler.py │ │ │ │ ├── random_sampler.py │ │ │ │ ├── sampling_result.py │ │ │ │ └── score_hlr_sampler.py │ │ │ └── transforms.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── bbox_overlaps.py │ │ │ ├── class_names.py │ │ │ ├── eval_hooks.py │ │ │ ├── mean_ap.py │ │ │ └── recall.py │ │ ├── export │ │ │ ├── __init__.py │ │ │ └── pytorch2onnx.py │ │ ├── mask │ │ │ ├── __init__.py │ │ │ ├── mask_target.py │ │ │ ├── structures.py │ │ │ └── utils.py │ │ ├── post_processing │ │ │ ├── __init__.py │ │ │ ├── bbox_nms.py │ │ │ └── merge_augs.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── dist_utils.py │ │ │ └── misc.py │ │ └── visualization │ │ │ ├── __init__.py │ │ │ └── image.py │ ├── datasets │ │ ├── .DS_Store │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── cityscapes.py │ │ ├── coco.py │ │ ├── custom.py │ │ ├── dataset_wrappers.py │ │ ├── deepfashion.py │ │ ├── lvis.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── auto_augment.py │ │ │ ├── compose.py │ │ │ ├── formating.py │ │ │ ├── instaboost.py │ │ │ ├── loading.py │ │ │ ├── test_time_aug.py │ │ │ └── transforms.py │ │ ├── samplers │ │ │ ├── __init__.py │ │ │ ├── distributed_sampler.py │ │ │ └── group_sampler.py │ │ ├── utils.py │ │ ├── voc.py │ │ ├── wider_face.py │ │ └── xml_style.py │ ├── models │ │ ├── .DS_Store │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── darknet.py │ │ │ ├── detectors_resnet.py │ │ │ ├── detectors_resnext.py │ │ │ ├── hourglass.py │ │ │ ├── hrnet.py │ │ │ ├── regnet.py │ │ │ ├── res2net.py │ │ │ ├── resnest.py │ │ │ ├── resnet.py │ │ │ ├── resnext.py │ │ │ ├── ssd_vgg.py │ │ │ └── trident_resnet.py │ │ ├── builder.py │ │ ├── dense_heads │ │ │ ├── __init__.py │ │ │ ├── anchor_free_head.py │ │ │ ├── anchor_head.py │ │ │ ├── atss_head.py │ │ │ ├── base_dense_head.py │ │ │ ├── cascade_rpn_head.py │ │ │ ├── centripetal_head.py │ │ │ ├── corner_head.py │ │ │ ├── dense_test_mixins.py │ │ │ ├── embedding_rpn_head.py │ │ │ ├── fcos_head.py │ │ │ ├── fovea_head.py │ │ │ ├── free_anchor_retina_head.py │ │ │ ├── fsaf_head.py │ │ │ ├── ga_retina_head.py │ │ │ ├── ga_rpn_head.py │ │ │ ├── gfl_head.py │ │ │ ├── guided_anchor_head.py │ │ │ ├── ld_head.py │ │ │ ├── nasfcos_head.py │ │ │ ├── paa_head.py │ │ │ ├── pisa_retinanet_head.py │ │ │ ├── pisa_ssd_head.py │ │ │ ├── reppoints_head.py │ │ │ ├── retina_head.py │ │ │ ├── retina_sepbn_head.py │ │ │ ├── rpn_head.py │ │ │ ├── rpn_test_mixin.py │ │ │ ├── sabl_retina_head.py │ │ │ ├── ssd_head.py │ │ │ ├── transformer_head.py │ │ │ ├── vfnet_head.py │ │ │ ├── yolact_head.py │ │ │ └── yolo_head.py │ │ ├── detectors │ │ │ ├── .DS_Store │ │ │ ├── __init__.py │ │ │ ├── atss.py │ │ │ ├── base.py │ │ │ ├── cascade_rcnn.py │ │ │ ├── cornernet.py │ │ │ ├── detr.py │ │ │ ├── fast_rcnn.py │ │ │ ├── faster_rcnn.py │ │ │ ├── fcos.py │ │ │ ├── fovea.py │ │ │ ├── fsaf.py │ │ │ ├── gfl.py │ │ │ ├── grid_rcnn.py │ │ │ ├── htc.py │ │ │ ├── kd_one_stage.py │ │ │ ├── mask_rcnn.py │ │ │ ├── mask_scoring_rcnn.py │ │ │ ├── nasfcos.py │ │ │ ├── paa.py │ │ │ ├── point_rend.py │ │ │ ├── reppoints_detector.py │ │ │ ├── retinanet.py │ │ │ ├── rpn.py │ │ │ ├── scnet.py │ │ │ ├── single_stage.py │ │ │ ├── sparse_rcnn.py │ │ │ ├── trident_faster_rcnn.py │ │ │ ├── two_stage.py │ │ │ ├── vfnet.py │ │ │ ├── yolact.py │ │ │ └── yolo.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── accuracy.py │ │ │ ├── ae_loss.py │ │ │ ├── balanced_l1_loss.py │ │ │ ├── cross_entropy_loss.py │ │ │ ├── focal_loss.py │ │ │ ├── gaussian_focal_loss.py │ │ │ ├── gfocal_loss.py │ │ │ ├── ghm_loss.py │ │ │ ├── iou_loss.py │ │ │ ├── kd_loss.py │ │ │ ├── mse_loss.py │ │ │ ├── pisa_loss.py │ │ │ ├── smooth_l1_loss.py │ │ │ ├── utils.py │ │ │ └── varifocal_loss.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ ├── bfp.py │ │ │ ├── channel_mapper.py │ │ │ ├── fpg.py │ │ │ ├── fpn.py │ │ │ ├── fpn_carafe.py │ │ │ ├── hrfpn.py │ │ │ ├── nas_fpn.py │ │ │ ├── nasfcos_fpn.py │ │ │ ├── pafpn.py │ │ │ ├── rfp.py │ │ │ └── yolo_neck.py │ │ ├── roi_heads │ │ │ ├── __init__.py │ │ │ ├── base_roi_head.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── bbox_head.py │ │ │ │ ├── convfc_bbox_head.py │ │ │ │ ├── dii_head.py │ │ │ │ ├── double_bbox_head.py │ │ │ │ ├── sabl_head.py │ │ │ │ └── scnet_bbox_head.py │ │ │ ├── cascade_roi_head.py │ │ │ ├── double_roi_head.py │ │ │ ├── dynamic_roi_head.py │ │ │ ├── grid_roi_head.py │ │ │ ├── htc_roi_head.py │ │ │ ├── mask_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── coarse_mask_head.py │ │ │ │ ├── fcn_mask_head.py │ │ │ │ ├── feature_relay_head.py │ │ │ │ ├── fused_semantic_head.py │ │ │ │ ├── global_context_head.py │ │ │ │ ├── grid_head.py │ │ │ │ ├── htc_mask_head.py │ │ │ │ ├── mask_point_head.py │ │ │ │ ├── maskiou_head.py │ │ │ │ ├── scnet_mask_head.py │ │ │ │ └── scnet_semantic_head.py │ │ │ ├── mask_scoring_roi_head.py │ │ │ ├── pisa_roi_head.py │ │ │ ├── point_rend_roi_head.py │ │ │ ├── roi_extractors │ │ │ │ ├── __init__.py │ │ │ │ ├── base_roi_extractor.py │ │ │ │ ├── generic_roi_extractor.py │ │ │ │ └── single_level_roi_extractor.py │ │ │ ├── scnet_roi_head.py │ │ │ ├── shared_heads │ │ │ │ ├── __init__.py │ │ │ │ └── res_layer.py │ │ │ ├── sparse_roi_head.py │ │ │ ├── standard_roi_head.py │ │ │ ├── test_mixins.py │ │ │ └── trident_roi_head.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ ├── gaussian_target.py │ │ │ ├── positional_encoding.py │ │ │ ├── res_layer.py │ │ │ └── transformer.py │ ├── utils │ │ ├── __init__.py │ │ ├── collect_env.py │ │ ├── contextmanagers.py │ │ ├── logger.py │ │ ├── profiling.py │ │ ├── util_mixins.py │ │ └── util_random.py │ └── version.py │ ├── mmdet3d │ ├── .DS_Store │ ├── __init__.py │ ├── apis │ │ ├── __init__.py │ │ ├── inference.py │ │ ├── test.py │ │ └── train.py │ ├── core │ │ ├── __init__.py │ │ ├── anchor │ │ │ ├── __init__.py │ │ │ └── anchor_3d_generator.py │ │ ├── bbox │ │ │ ├── __init__.py │ │ │ ├── assigners │ │ │ │ └── __init__.py │ │ │ ├── box_np_ops.py │ │ │ ├── coders │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor_free_bbox_coder.py │ │ │ │ ├── centerpoint_bbox_coders.py │ │ │ │ ├── delta_xyzwhlr_bbox_coder.py │ │ │ │ └── partial_bin_based_bbox_coder.py │ │ │ ├── iou_calculators │ │ │ │ ├── __init__.py │ │ │ │ └── iou3d_calculator.py │ │ │ ├── samplers │ │ │ │ ├── __init__.py │ │ │ │ └── iou_neg_piecewise_sampler.py │ │ │ ├── structures │ │ │ │ ├── __init__.py │ │ │ │ ├── base_box3d.py │ │ │ │ ├── box_3d_mode.py │ │ │ │ ├── cam_box3d.py │ │ │ │ ├── coord_3d_mode.py │ │ │ │ ├── depth_box3d.py │ │ │ │ ├── lidar_box3d.py │ │ │ │ └── utils.py │ │ │ └── transforms.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── indoor_eval.py │ │ │ ├── kitti_utils │ │ │ │ ├── __init__.py │ │ │ │ ├── eval.py │ │ │ │ └── rotate_iou.py │ │ │ ├── lyft_eval.py │ │ │ ├── seg_eval.py │ │ │ └── waymo_utils │ │ │ │ └── prediction_kitti_to_waymo.py │ │ ├── points │ │ │ ├── __init__.py │ │ │ ├── base_points.py │ │ │ ├── cam_points.py │ │ │ ├── depth_points.py │ │ │ └── lidar_points.py │ │ ├── post_processing │ │ │ ├── __init__.py │ │ │ ├── box3d_nms.py │ │ │ └── merge_augs.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── gaussian.py │ │ ├── visualizer │ │ │ ├── __init__.py │ │ │ ├── image_vis.py │ │ │ ├── open3d_vis.py │ │ │ └── show_result.py │ │ └── voxel │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ └── voxel_generator.py │ ├── datasets │ │ ├── __init__.py │ │ ├── builder.py │ │ ├── custom_3d.py │ │ ├── custom_3d_seg.py │ │ ├── dataset_wrappers.py │ │ ├── kitti2d_dataset.py │ │ ├── kitti_dataset.py │ │ ├── kitti_mono_dataset.py │ │ ├── lyft_dataset.py │ │ ├── nuscenes_dataset.py │ │ ├── nuscenes_mono_dataset.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── data_augment_utils.py │ │ │ ├── dbsampler.py │ │ │ ├── formating.py │ │ │ ├── loading.py │ │ │ ├── test_time_aug.py │ │ │ └── transforms_3d.py │ │ ├── s3dis_dataset.py │ │ ├── scannet_dataset.py │ │ ├── semantickitti_dataset.py │ │ ├── sunrgbd_dataset.py │ │ ├── utils.py │ │ └── waymo_dataset.py │ ├── models │ │ ├── .DS_Store │ │ ├── __init__.py │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── base_pointnet.py │ │ │ ├── multi_backbone.py │ │ │ ├── nostem_regnet.py │ │ │ ├── pointnet2_sa_msg.py │ │ │ ├── pointnet2_sa_ssg.py │ │ │ └── second.py │ │ ├── builder.py │ │ ├── decode_heads │ │ │ ├── __init__.py │ │ │ ├── decode_head.py │ │ │ └── pointnet2_head.py │ │ ├── dense_heads │ │ │ ├── __init__.py │ │ │ ├── anchor3d_head.py │ │ │ ├── anchor_free_mono3d_head.py │ │ │ ├── base_conv_bbox_head.py │ │ │ ├── base_mono3d_dense_head.py │ │ │ ├── centerpoint_head.py │ │ │ ├── fcos_mono3d_head.py │ │ │ ├── free_anchor3d_head.py │ │ │ ├── parta2_rpn_head.py │ │ │ ├── shape_aware_head.py │ │ │ ├── ssd_3d_head.py │ │ │ ├── train_mixins.py │ │ │ └── vote_head.py │ │ ├── detectors │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── centerpoint.py │ │ │ ├── dynamic_voxelnet.py │ │ │ ├── fcos_mono3d.py │ │ │ ├── h3dnet.py │ │ │ ├── imvotenet.py │ │ │ ├── mvx_faster_rcnn.py │ │ │ ├── mvx_two_stage.py │ │ │ ├── mvx_two_stage_org.py │ │ │ ├── parta2.py │ │ │ ├── single_stage.py │ │ │ ├── single_stage_mono3d.py │ │ │ ├── ssd3dnet.py │ │ │ ├── two_stage.py │ │ │ ├── votenet.py │ │ │ └── voxelnet.py │ │ ├── flow_head │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── flow_embedding.py │ │ │ ├── flownet3d.py │ │ │ ├── flownet3d_head.py │ │ │ ├── flownet3d_head_org.py │ │ │ ├── group.py │ │ │ ├── load_weights.py │ │ │ ├── pointnet_featprop.py │ │ │ ├── pointnet_setconv.py │ │ │ ├── pointnet_setupconv.py │ │ │ └── sample.py │ │ ├── fusion_layers │ │ │ ├── __init__.py │ │ │ ├── coord_transform.py │ │ │ ├── point_fusion.py │ │ │ └── vote_fusion.py │ │ ├── losses │ │ │ ├── __init__.py │ │ │ ├── axis_aligned_iou_loss.py │ │ │ ├── chamfer_distance.py │ │ │ └── cycle_loss.py │ │ ├── middle_encoders │ │ │ ├── __init__.py │ │ │ ├── pillar_scatter.py │ │ │ ├── sparse_encoder.py │ │ │ └── sparse_unet.py │ │ ├── model_utils │ │ │ ├── __init__.py │ │ │ └── vote_module.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ └── second_fpn.py │ │ ├── roi_heads │ │ │ ├── __init__.py │ │ │ ├── base_3droi_head.py │ │ │ ├── bbox_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── h3d_bbox_head.py │ │ │ │ └── parta2_bbox_head.py │ │ │ ├── h3d_roi_head.py │ │ │ ├── mask_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── pointwise_semantic_head.py │ │ │ │ └── primitive_head.py │ │ │ ├── part_aggregation_roi_head.py │ │ │ └── roi_extractors │ │ │ │ ├── __init__.py │ │ │ │ └── single_roiaware_extractor.py │ │ ├── segmentors │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── encoder_decoder.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── clip_sigmoid.py │ │ │ └── mlp.py │ │ └── voxel_encoders │ │ │ ├── .DS_Store │ │ │ ├── __init__.py │ │ │ ├── pillar_encoder.py │ │ │ ├── utils.py │ │ │ ├── utils_flow_full.py │ │ │ ├── utils_flow_unfull.py │ │ │ ├── voxel_encoder.py │ │ │ ├── voxel_encoder_flow.py │ │ │ └── voxel_encoder_org.py │ ├── ops │ │ ├── __init__.py │ │ ├── ball_query │ │ │ ├── __init__.py │ │ │ ├── ball_query.py │ │ │ ├── ball_query_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ └── src │ │ │ │ ├── ball_query.cpp │ │ │ │ └── ball_query_cuda.cu │ │ ├── furthest_point_sample │ │ │ ├── __init__.py │ │ │ ├── furthest_point_sample.py │ │ │ ├── furthest_point_sample_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ ├── points_sampler.py │ │ │ ├── src │ │ │ │ ├── furthest_point_sample.cpp │ │ │ │ └── furthest_point_sample_cuda.cu │ │ │ └── utils.py │ │ ├── gather_points │ │ │ ├── __init__.py │ │ │ ├── gather_points.py │ │ │ ├── gather_points_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ └── src │ │ │ │ ├── gather_points.cpp │ │ │ │ └── gather_points_cuda.cu │ │ ├── group_points │ │ │ ├── __init__.py │ │ │ ├── group_points.py │ │ │ ├── group_points_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ └── src │ │ │ │ ├── group_points.cpp │ │ │ │ └── group_points_cuda.cu │ │ ├── interpolate │ │ │ ├── __init__.py │ │ │ ├── interpolate_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ ├── src │ │ │ │ ├── interpolate.cpp │ │ │ │ ├── three_interpolate_cuda.cu │ │ │ │ └── three_nn_cuda.cu │ │ │ ├── three_interpolate.py │ │ │ └── three_nn.py │ │ ├── iou3d │ │ │ ├── __init__.py │ │ │ ├── iou3d_cuda.cpython-37m-x86_64-linux-gnu.so │ │ │ ├── iou3d_utils.py │ │ │ └── src │ │ │ │ ├── iou3d.cpp │ │ │ │ └── iou3d_kernel.cu │ │ ├── knn │ │ │ ├── __init__.py │ │ │ ├── knn.py │ │ │ ├── knn_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ └── src │ │ │ │ ├── knn.cpp │ │ │ │ └── knn_cuda.cu │ │ ├── norm.py │ │ ├── pointnet_modules │ │ │ ├── __init__.py │ │ │ ├── builder.py │ │ │ ├── point_fp_module.py │ │ │ └── point_sa_module.py │ │ ├── roiaware_pool3d │ │ │ ├── __init__.py │ │ │ ├── points_in_boxes.py │ │ │ ├── roiaware_pool3d.py │ │ │ ├── roiaware_pool3d_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ └── src │ │ │ │ ├── points_in_boxes_cpu.cpp │ │ │ │ ├── points_in_boxes_cuda.cu │ │ │ │ ├── roiaware_pool3d.cpp │ │ │ │ └── roiaware_pool3d_kernel.cu │ │ ├── sparse_block.py │ │ ├── spconv │ │ │ ├── __init__.py │ │ │ ├── conv.py │ │ │ ├── functional.py │ │ │ ├── include │ │ │ │ ├── paramsgrid.h │ │ │ │ ├── prettyprint.h │ │ │ │ ├── pybind11_utils.h │ │ │ │ ├── spconv │ │ │ │ │ ├── fused_spconv_ops.h │ │ │ │ │ ├── geometry.h │ │ │ │ │ ├── indice.cu.h │ │ │ │ │ ├── indice.h │ │ │ │ │ ├── maxpool.h │ │ │ │ │ ├── mp_helper.h │ │ │ │ │ ├── point2voxel.h │ │ │ │ │ ├── pool_ops.h │ │ │ │ │ ├── reordering.cu.h │ │ │ │ │ ├── reordering.h │ │ │ │ │ └── spconv_ops.h │ │ │ │ ├── tensorview │ │ │ │ │ ├── helper_kernel.cu.h │ │ │ │ │ ├── helper_launch.h │ │ │ │ │ └── tensorview.h │ │ │ │ ├── torch_utils.h │ │ │ │ └── utility │ │ │ │ │ └── timer.h │ │ │ ├── modules.py │ │ │ ├── ops.py │ │ │ ├── pool.py │ │ │ ├── sparse_conv_ext.cpython-37m-x86_64-linux-gnu.so │ │ │ ├── src │ │ │ │ ├── all.cc │ │ │ │ ├── indice.cc │ │ │ │ ├── indice_cuda.cu │ │ │ │ ├── maxpool.cc │ │ │ │ ├── maxpool_cuda.cu │ │ │ │ ├── reordering.cc │ │ │ │ └── reordering_cuda.cu │ │ │ ├── structure.py │ │ │ └── test_utils.py │ │ └── voxel │ │ │ ├── __init__.py │ │ │ ├── scatter_points.py │ │ │ ├── src │ │ │ ├── scatter_points_cpu.cpp │ │ │ ├── scatter_points_cuda.cu │ │ │ ├── voxelization.cpp │ │ │ ├── voxelization.h │ │ │ ├── voxelization_cpu.cpp │ │ │ └── voxelization_cuda.cu │ │ │ ├── voxel_layer.cpython-37m-x86_64-linux-gnu.so │ │ │ └── voxelize.py │ ├── utils │ │ ├── __init__.py │ │ ├── collect_env.py │ │ └── logger.py │ └── version.py │ └── tools │ ├── .DS_Store │ ├── analysis_tools │ ├── analyze_logs.py │ └── benchmark.py │ ├── create_data.py │ ├── create_data.sh │ ├── data_converter │ ├── __init__.py │ ├── create_gt_database.py │ ├── indoor_converter.py │ ├── kitti_converter.py │ ├── kitti_data_utils.py │ ├── lyft_converter.py │ ├── lyft_data_fixer.py │ ├── nuimage_converter.py │ ├── nuscenes_converter.py │ ├── s3dis_data_utils.py │ ├── scannet_data_utils.py │ ├── sunrgbd_data_utils.py │ └── waymo_converter.py │ ├── dist_test.sh │ ├── dist_train.sh │ ├── misc │ ├── browse_dataset.py │ ├── fuse_conv_bn.py │ ├── print_config.py │ └── visualize_results.py │ ├── model_converters │ ├── convert_votenet_checkpoints.py │ ├── publish_model.py │ └── regnet2mmdet.py │ ├── slurm_test.sh │ ├── slurm_train.sh │ ├── subsample.py │ ├── test.py │ ├── test_our_centerpoint_nus.sh │ ├── test_our_pointpillars_kitti.sh │ ├── test_our_pointpillars_nus.sh │ ├── test_our_ssn_nus.sh │ └── train.py └── pointgnn ├── README.md ├── checkpoints ├── detection_after_flow_step2 │ ├── checkpoint │ ├── config │ └── train_config ├── detection_after_flow_step4 │ ├── checkpoint │ ├── config │ └── train_config ├── detection_baseline │ ├── checkpoint │ ├── config │ └── train_config ├── flow_pretrained_sim_data │ └── checkpoint ├── selfsupervised_flow_step1 │ ├── checkpoint │ ├── config │ └── train_config └── selfsupervised_flow_step3 │ ├── checkpoint │ ├── config │ └── train_config ├── configs ├── detection_after_flow_step2 │ ├── car_auto_T3_train_config │ └── car_auto_T3_train_train_config ├── detection_after_flow_step4 │ ├── car_auto_T3_train_config │ └── car_auto_T3_train_train_config ├── detection_baseline │ ├── car_auto_T3_train_config │ └── car_auto_T3_train_train_config ├── selfsupervised_flow_step1 │ ├── car_auto_T3_train_config │ └── car_auto_T3_train_train_config └── selfsupervised_flow_step3 │ ├── car_auto_T3_train_config │ └── car_auto_T3_train_train_config ├── dataset ├── README.md ├── kitti_dataset.py └── tracking2pointgnn │ ├── kitti_util.py │ ├── lstm_seq_data.py │ ├── move_tracking2detection.py │ ├── move_tracking2detection.sh │ ├── tracking2object.py │ └── tracking2object.sh ├── docker ├── Dockerfile ├── build_image.sh ├── requirements.txt └── run_docker.sh ├── eval.py ├── eval_script └── evaluate_object_3d_offline ├── flow ├── make_tf_ops.sh ├── old2new_ckpt.py ├── src │ ├── __init__.py │ ├── model_concat_upsa_cycle.py │ ├── tf_ops │ │ ├── 3d_interpolation │ │ │ ├── __init__.py │ │ │ ├── interpolate.cpp │ │ │ ├── tf_interpolate.cpp │ │ │ ├── tf_interpolate.py │ │ │ ├── tf_interpolate_compile.sh │ │ │ ├── tf_interpolate_op_test.py │ │ │ ├── tf_interpolate_so.so │ │ │ └── visu_interpolation.py │ │ ├── __init__.py │ │ ├── grouping │ │ │ ├── __init__.py │ │ │ ├── compile.sh │ │ │ ├── query_ball_point │ │ │ ├── query_ball_point.cpp │ │ │ ├── query_ball_point.cu │ │ │ ├── query_ball_point_block │ │ │ ├── query_ball_point_block.cu │ │ │ ├── query_ball_point_cuda │ │ │ ├── query_ball_point_grid │ │ │ ├── query_ball_point_grid.cu │ │ │ ├── selection_sort │ │ │ ├── selection_sort.cpp │ │ │ ├── selection_sort.cu │ │ │ ├── selection_sort_const.cu │ │ │ ├── selection_sort_cuda │ │ │ ├── test_knn.py │ │ │ ├── tf_grouping.cpp │ │ │ ├── tf_grouping.py │ │ │ ├── tf_grouping_compile.sh │ │ │ ├── tf_grouping_g.cu │ │ │ ├── tf_grouping_g.cu.o │ │ │ ├── tf_grouping_op_test.py │ │ │ └── tf_grouping_so.so │ │ └── sampling │ │ │ ├── __init__.py │ │ │ ├── tf_sampling.cpp │ │ │ ├── tf_sampling.py │ │ │ ├── tf_sampling_compile.sh │ │ │ ├── tf_sampling_g.cu │ │ │ ├── tf_sampling_g.cu.o │ │ │ └── tf_sampling_so.so │ └── utils │ │ ├── __init__.py │ │ ├── pointnet_util.py │ │ ├── prepare_split_file_supervised.py │ │ └── tf_util.py └── visualization.py ├── kitti_native_evaluation ├── CMakeFiles │ ├── 3.10.2 │ │ ├── CMakeCCompiler.cmake │ │ ├── CMakeCXXCompiler.cmake │ │ ├── CMakeDetermineCompilerABI_C.bin │ │ ├── CMakeDetermineCompilerABI_CXX.bin │ │ ├── CMakeSystem.cmake │ │ ├── CompilerIdC │ │ │ ├── CMakeCCompilerId.c │ │ │ └── a.out │ │ └── CompilerIdCXX │ │ │ ├── CMakeCXXCompilerId.cpp │ │ │ └── a.out │ ├── Makefile.cmake │ ├── cmake.check_cache │ ├── evaluate_object_3d_offline.dir │ │ ├── cmake_clean.cmake │ │ ├── depend.make │ │ ├── link.txt │ │ ├── progress.make │ │ └── src │ │ │ └── evaluate_object_3d_offline.cpp.o │ ├── feature_tests.bin │ ├── feature_tests.c │ ├── feature_tests.cxx │ └── progress.marks ├── CMakeLists.txt ├── README.md ├── evaluate_object_3d_offline ├── include │ └── mail.h └── src │ └── evaluate_object_3d_offline.cpp ├── models ├── box_encoding.py ├── crop_aug.py ├── flow_model_utils.py ├── gnn.py ├── graph_gen.py ├── loss.py ├── models.py ├── nms.py ├── preprocess.py └── var_list.py ├── run.py ├── sample_train_data.py ├── scripts └── point_cloud_downsample.py ├── splits ├── train.txt ├── train_5.txt ├── train_car.txt ├── train_pedestrian_cyclist.txt ├── trainval_car.txt ├── trainval_pedestrian_cyclist.txt └── val.txt ├── test_on_val.sh ├── train.py ├── train.sh └── util ├── config_util.py ├── summary_util.py └── tf_util.py /.gitignore: -------------------------------------------------------------------------------- 1 | model.data-00000-of-00001 2 | model.index 3 | model.meta 4 | *.pth 5 | *.pyc 6 | -------------------------------------------------------------------------------- /5763.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/5763.mp4 -------------------------------------------------------------------------------- /5763.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/5763.pdf -------------------------------------------------------------------------------- /figs/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/figs/overview.png -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/checkpoints/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/checkpoints/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/configs/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/3dssd/metafile.yml: -------------------------------------------------------------------------------- 1 | Collections: 2 | - Name: 3DSSD 3 | Metadata: 4 | Training Data: KITTI 5 | Training Techniques: 6 | - AdamW 7 | Training Resources: 4x TITAN X 8 | Architecture: 9 | - PointNet++ 10 | Paper: https://arxiv.org/abs/2002.10187 11 | README: configs/3dssd/README.md 12 | 13 | Models: 14 | - Name: 3dssd_kitti-3d-car 15 | In Collection: 3DSSD 16 | Config: configs/3dssd/3dssd_kitti-3d-car.py 17 | Metadata: 18 | Training Memory (GB): 4.7 19 | Results: 20 | - Task: 3D Object Detection 21 | Dataset: KITTI 22 | Metrics: 23 | mAP: 78.39 24 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/3dssd/3dssd_kitti-3d-car_20210324_122002-07e9a19b.pth 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/configs/_base_/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/datasets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/configs/_base_/datasets/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | checkpoint_config = dict(interval=1) 2 | # yapf:disable push 3 | # By default we use textlogger hook and tensorboard 4 | # For more loggers see 5 | # https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook 6 | log_config = dict( 7 | interval=50, 8 | hooks=[ 9 | dict(type='TextLoggerHook'), 10 | dict(type='TensorboardLoggerHook') 11 | ]) 12 | # yapf:enable 13 | dist_params = dict(backend='nccl') 14 | log_level = 'INFO' 15 | work_dir = None 16 | load_from = None 17 | resume_from = None 18 | workflow = [('train', 1)] 19 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/models/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/configs/_base_/models/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/models/hv_pointpillars_fpn_lyft.py: -------------------------------------------------------------------------------- 1 | _base_ = './hv_pointpillars_fpn_nus.py' 2 | 3 | # model settings (based on nuScenes model settings) 4 | # Voxel size for voxel encoder 5 | # Usually voxel size is changed consistently with the point cloud range 6 | # If point cloud range is modified, do remember to change all related 7 | # keys in the config. 8 | model = dict( 9 | pts_voxel_layer=dict( 10 | max_num_points=20, 11 | point_cloud_range=[-80, -80, -5, 80, 80, 3], 12 | max_voxels=(60000, 60000)), 13 | pts_voxel_encoder=dict( 14 | feat_channels=[64], point_cloud_range=[-80, -80, -5, 80, 80, 3]), 15 | pts_middle_encoder=dict(output_shape=[640, 640]), 16 | pts_bbox_head=dict( 17 | num_classes=9, 18 | anchor_generator=dict( 19 | ranges=[[-80, -80, -1.8, 80, 80, -1.8]], custom_values=[]), 20 | bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), 21 | # model training settings (based on nuScenes model settings) 22 | train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) 23 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py: -------------------------------------------------------------------------------- 1 | _base_ = './hv_pointpillars_fpn_nus.py' 2 | 3 | # model settings (based on nuScenes model settings) 4 | # Voxel size for voxel encoder 5 | # Usually voxel size is changed consistently with the point cloud range 6 | # If point cloud range is modified, do remember to change all related 7 | # keys in the config. 8 | model = dict( 9 | pts_voxel_layer=dict( 10 | max_num_points=20, 11 | point_cloud_range=[-100, -100, -5, 100, 100, 3], 12 | max_voxels=(60000, 60000)), 13 | pts_voxel_encoder=dict( 14 | feat_channels=[64], point_cloud_range=[-100, -100, -5, 100, 100, 3]), 15 | pts_middle_encoder=dict(output_shape=[800, 800]), 16 | pts_bbox_head=dict( 17 | num_classes=9, 18 | anchor_generator=dict( 19 | ranges=[[-100, -100, -1.8, 100, 100, -1.8]], custom_values=[]), 20 | bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), 21 | # model training settings (based on nuScenes model settings) 22 | train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) 23 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/models/pointnet2_msg.py: -------------------------------------------------------------------------------- 1 | _base_ = './pointnet2_ssg.py' 2 | 3 | # model settings 4 | model = dict( 5 | backbone=dict( 6 | _delete_=True, 7 | type='PointNet2SAMSG', 8 | in_channels=6, # [xyz, rgb], should be modified with dataset 9 | num_points=(1024, 256, 64, 16), 10 | radii=((0.05, 0.1), (0.1, 0.2), (0.2, 0.4), (0.4, 0.8)), 11 | num_samples=((16, 32), (16, 32), (16, 32), (16, 32)), 12 | sa_channels=(((16, 16, 32), (32, 32, 64)), ((64, 64, 128), (64, 96, 13 | 128)), 14 | ((128, 196, 256), (128, 196, 256)), ((256, 256, 512), 15 | (256, 384, 512))), 16 | aggregation_channels=(None, None, None, None), 17 | fps_mods=(('D-FPS'), ('D-FPS'), ('D-FPS'), ('D-FPS')), 18 | fps_sample_range_lists=((-1), (-1), (-1), (-1)), 19 | dilated_group=(False, False, False, False), 20 | out_indices=(0, 1, 2, 3), 21 | sa_cfg=dict( 22 | type='PointSAModuleMSG', 23 | pool_mod='max', 24 | use_xyz=True, 25 | normalize_xyz=False)), 26 | decode_head=dict( 27 | fp_channels=((1536, 256, 256), (512, 256, 256), (352, 256, 128), 28 | (128, 128, 128, 128)))) 29 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/models/pointnet2_ssg.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | model = dict( 3 | type='EncoderDecoder3D', 4 | backbone=dict( 5 | type='PointNet2SASSG', 6 | in_channels=6, # [xyz, rgb], should be modified with dataset 7 | num_points=(1024, 256, 64, 16), 8 | radius=(0.1, 0.2, 0.4, 0.8), 9 | num_samples=(32, 32, 32, 32), 10 | sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, 11 | 512)), 12 | fp_channels=(), 13 | norm_cfg=dict(type='BN2d'), 14 | sa_cfg=dict( 15 | type='PointSAModule', 16 | pool_mod='max', 17 | use_xyz=True, 18 | normalize_xyz=False)), 19 | decode_head=dict( 20 | type='PointNet2Head', 21 | fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), 22 | (128, 128, 128, 128)), 23 | channels=128, 24 | dropout_ratio=0.5, 25 | conv_cfg=dict(type='Conv1d'), 26 | norm_cfg=dict(type='BN1d'), 27 | act_cfg=dict(type='ReLU'), 28 | loss_decode=dict( 29 | type='CrossEntropyLoss', 30 | use_sigmoid=False, 31 | class_weight=None, # should be modified with dataset 32 | loss_weight=1.0)), 33 | # model training and testing settings 34 | train_cfg=dict(), 35 | test_cfg=dict(mode='slide')) 36 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/schedules/cosine.py: -------------------------------------------------------------------------------- 1 | # This schedule is mainly used by models with dynamic voxelization 2 | # optimizer 3 | lr = 0.003 # max learning rate 4 | optimizer = dict( 5 | type='AdamW', 6 | lr=lr, 7 | betas=(0.95, 0.99), # the momentum is change during training 8 | weight_decay=0.001) 9 | optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) 10 | 11 | lr_config = dict( 12 | policy='CosineAnnealing', 13 | warmup='linear', 14 | warmup_iters=1000, 15 | warmup_ratio=1.0 / 10, 16 | min_lr_ratio=1e-5) 17 | 18 | momentum_config = None 19 | 20 | runner = dict(type='EpochBasedRunner', max_epochs=40) 21 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/schedules/cyclic_20e.py: -------------------------------------------------------------------------------- 1 | # For nuScenes dataset, we usually evaluate the model at the end of training. 2 | # Since the models are trained by 24 epochs by default, we set evaluation 3 | # interval to be 20. Please change the interval accordingly if you do not 4 | # use a default schedule. 5 | # optimizer 6 | # This schedule is mainly used by models on nuScenes dataset 7 | optimizer = dict(type='AdamW', lr=1e-4, weight_decay=0.01) 8 | # max_norm=10 is better for SECOND 9 | optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 10 | lr_config = dict( 11 | policy='cyclic', 12 | target_ratio=(10, 1e-4), 13 | cyclic_times=1, 14 | step_ratio_up=0.4, 15 | ) 16 | momentum_config = dict( 17 | policy='cyclic', 18 | target_ratio=(0.85 / 0.95, 1), 19 | cyclic_times=1, 20 | step_ratio_up=0.4, 21 | ) 22 | 23 | # runtime settings 24 | runner = dict(type='EpochBasedRunner', max_epochs=20) 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/schedules/mmdet_schedule_1x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) 3 | optimizer_config = dict(grad_clip=None) 4 | # learning policy 5 | lr_config = dict( 6 | policy='step', 7 | warmup='linear', 8 | warmup_iters=500, 9 | warmup_ratio=0.001, 10 | step=[8, 11]) 11 | runner = dict(type='EpochBasedRunner', max_epochs=12) 12 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/schedules/schedule_2x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used by models on nuScenes dataset 3 | optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.01) 4 | # max_norm=10 is better for SECOND 5 | optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 6 | lr_config = dict( 7 | policy='step', 8 | warmup='linear', 9 | warmup_iters=1000, 10 | warmup_ratio=1.0 / 1000, 11 | step=[20, 23]) 12 | momentum_config = None 13 | # runtime settings 14 | runner = dict(type='EpochBasedRunner', max_epochs=24) 15 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/schedules/schedule_3x.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used by models on indoor dataset, 3 | # e.g., VoteNet on SUNRGBD and ScanNet 4 | lr = 0.008 # max learning rate 5 | optimizer = dict(type='AdamW', lr=lr, weight_decay=0.01) 6 | optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) 7 | lr_config = dict(policy='step', warmup=None, step=[24, 32]) 8 | # runtime settings 9 | runner = dict(type='EpochBasedRunner', max_epochs=36) 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/schedules/seg_cosine_200e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used on ScanNet dataset in segmentation task 3 | optimizer = dict(type='Adam', lr=0.001, weight_decay=0.01) 4 | optimizer_config = dict(grad_clip=None) 5 | lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) 6 | momentum_config = None 7 | 8 | # runtime settings 9 | runner = dict(type='EpochBasedRunner', max_epochs=200) 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/_base_/schedules/seg_cosine_50e.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | # This schedule is mainly used on S3DIS dataset in segmentation task 3 | optimizer = dict(type='Adam', lr=0.001, weight_decay=0.001) 4 | optimizer_config = dict(grad_clip=None) 5 | lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) 6 | momentum_config = None 7 | 8 | # runtime settings 9 | runner = dict(type='EpochBasedRunner', max_epochs=50) 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_0075voxel_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict(test_cfg=dict(pts=dict(nms_type='circle'))) 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_0075voxel_second_secfpn_dcn_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_0075voxel_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict( 4 | pts_bbox_head=dict( 5 | separate_head=dict( 6 | type='DCNSeparateHead', 7 | dcn_config=dict( 8 | type='DCN', 9 | in_channels=64, 10 | out_channels=64, 11 | kernel_size=3, 12 | padding=1, 13 | groups=4), 14 | init_bias=-2.19, 15 | final_kernel=3))) 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_0075voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_0075voxel_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict( 4 | pts_bbox_head=dict( 5 | separate_head=dict( 6 | type='DCNSeparateHead', 7 | dcn_config=dict( 8 | type='DCN', 9 | in_channels=64, 10 | out_channels=64, 11 | kernel_size=3, 12 | padding=1, 13 | groups=4), 14 | init_bias=-2.19, 15 | final_kernel=3)), 16 | test_cfg=dict(pts=dict(nms_type='circle'))) 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_01voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_01voxel_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict(test_cfg=dict(pts=dict(nms_type='circle'))) 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_01voxel_second_secfpn_dcn_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_01voxel_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict( 4 | pts_bbox_head=dict( 5 | separate_head=dict( 6 | type='DCNSeparateHead', 7 | dcn_config=dict( 8 | type='DCN', 9 | in_channels=64, 10 | out_channels=64, 11 | kernel_size=3, 12 | padding=1, 13 | groups=4), 14 | init_bias=-2.19, 15 | final_kernel=3))) 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_01voxel_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_01voxel_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict( 4 | pts_bbox_head=dict( 5 | separate_head=dict( 6 | type='DCNSeparateHead', 7 | dcn_config=dict( 8 | type='DCN', 9 | in_channels=64, 10 | out_channels=64, 11 | kernel_size=3, 12 | padding=1, 13 | groups=4), 14 | init_bias=-2.19, 15 | final_kernel=3)), 16 | test_cfg=dict(pts=dict(nms_type='circle'))) 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_02pillar_second_secfpn_circlenms_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_02pillar_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict(test_cfg=dict(pts=dict(nms_type='circle'))) 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_02pillar_second_secfpn_dcn_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_02pillar_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict( 4 | pts_bbox_head=dict( 5 | separate_head=dict( 6 | type='DCNSeparateHead', 7 | dcn_config=dict( 8 | type='DCN', 9 | in_channels=64, 10 | out_channels=64, 11 | kernel_size=3, 12 | padding=1, 13 | groups=4), 14 | init_bias=-2.19, 15 | final_kernel=3))) 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/centerpoint/centerpoint_02pillar_second_secfpn_dcn_circlenms_4x8_cyclic_20e_nus.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./centerpoint_02pillar_second_secfpn_4x8_cyclic_20e_nus.py'] 2 | 3 | model = dict( 4 | pts_bbox_head=dict( 5 | separate_head=dict( 6 | type='DCNSeparateHead', 7 | dcn_config=dict( 8 | type='DCN', 9 | in_channels=64, 10 | out_channels=64, 11 | kernel_size=3, 12 | padding=1, 13 | groups=4), 14 | init_bias=-2.19, 15 | final_kernel=3)), 16 | test_cfg=dict(pts=dict(nms_type='circle'))) 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/dynamic_voxelization/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py: -------------------------------------------------------------------------------- 1 | _base_ = '../pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py' 2 | 3 | voxel_size = [0.16, 0.16, 4] 4 | point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] 5 | 6 | model = dict( 7 | type='DynamicVoxelNet', 8 | voxel_layer=dict( 9 | max_num_points=-1, 10 | point_cloud_range=point_cloud_range, 11 | voxel_size=voxel_size, 12 | max_voxels=(-1, -1)), 13 | voxel_encoder=dict( 14 | type='DynamicPillarFeatureNet', 15 | in_channels=4, 16 | feat_channels=[64], 17 | with_distance=False, 18 | voxel_size=voxel_size, 19 | point_cloud_range=point_cloud_range)) 20 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_second_secfpn_kitti.py', 3 | '../_base_/datasets/kitti-3d-3class.py', '../_base_/schedules/cosine.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | 7 | point_cloud_range = [0, -40, -3, 70.4, 40, 1] 8 | voxel_size = [0.05, 0.05, 0.1] 9 | 10 | model = dict( 11 | type='DynamicVoxelNet', 12 | voxel_layer=dict( 13 | _delete_=True, 14 | max_num_points=-1, 15 | point_cloud_range=point_cloud_range, 16 | voxel_size=voxel_size, 17 | max_voxels=(-1, -1)), 18 | voxel_encoder=dict( 19 | _delete_=True, 20 | type='DynamicSimpleVFE', 21 | voxel_size=voxel_size, 22 | point_cloud_range=point_cloud_range)) 23 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/dynamic_voxelization/dv_second_secfpn_6x8_80e_kitti-3d-car.py: -------------------------------------------------------------------------------- 1 | _base_ = '../second/hv_second_secfpn_6x8_80e_kitti-3d-car.py' 2 | 3 | point_cloud_range = [0, -40, -3, 70.4, 40, 1] 4 | voxel_size = [0.05, 0.05, 0.1] 5 | 6 | model = dict( 7 | type='DynamicVoxelNet', 8 | voxel_layer=dict( 9 | _delete_=True, 10 | max_num_points=-1, 11 | point_cloud_range=point_cloud_range, 12 | voxel_size=voxel_size, 13 | max_voxels=(-1, -1)), 14 | voxel_encoder=dict( 15 | _delete_=True, 16 | type='DynamicSimpleVFE', 17 | voxel_size=voxel_size, 18 | point_cloud_range=point_cloud_range)) 19 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d_finetune.py: -------------------------------------------------------------------------------- 1 | _base_ = './fcos3d_r101_caffe_fpn_gn-head_dcn_2x8_1x_nus-mono3d.py' 2 | # model settings 3 | model = dict( 4 | train_cfg=dict( 5 | code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05])) 6 | # optimizer 7 | optimizer = dict(lr=0.001) 8 | load_from = 'work_dirs/fcos3d_nus/latest.pth' 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/fp16/hv_pointpillars_fpn_sbn-all_fp16_2x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = '../pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py' 2 | data = dict(samples_per_gpu=2, workers_per_gpu=2) 3 | # fp16 settings, the loss scale is specifically tuned to avoid Nan 4 | fp16 = dict(loss_scale=32.) 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/fp16/hv_pointpillars_regnet-400mf_fpn_sbn-all_fp16_2x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = '../regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d.py' 2 | data = dict(samples_per_gpu=2, workers_per_gpu=2) 3 | # fp16 settings, the loss scale is specifically tuned to avoid Nan 4 | fp16 = dict(loss_scale=32.) 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/fp16/hv_pointpillars_secfpn_sbn-all_fp16_2x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = '../pointpillars/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py' 2 | data = dict(samples_per_gpu=2, workers_per_gpu=2) 3 | # fp16 settings, the loss scale is specifically tuned to avoid Nan 4 | fp16 = dict(loss_scale=32.) 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-3class.py: -------------------------------------------------------------------------------- 1 | _base_ = '../second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/fp16/hv_second_secfpn_fp16_6x8_80e_kitti-3d-car.py: -------------------------------------------------------------------------------- 1 | _base_ = '../second/hv_second_secfpn_6x8_80e_kitti-3d-car.py' 2 | # fp16 settings 3 | fp16 = dict(loss_scale=512.) 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/free_anchor/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = './hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py' 2 | 3 | model = dict( 4 | pretrained=dict(pts='open-mmlab://regnetx_1.6gf'), 5 | pts_backbone=dict( 6 | _delete_=True, 7 | type='NoStemRegNet', 8 | arch='regnetx_1.6gf', 9 | out_indices=(1, 2, 3), 10 | frozen_stages=-1, 11 | strides=(1, 2, 2, 2), 12 | base_channels=64, 13 | stem_channels=64, 14 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 15 | norm_eval=False, 16 | style='pytorch'), 17 | pts_neck=dict(in_channels=[168, 408, 912])) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/free_anchor/hv_pointpillars_regnet-3.2gf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = './hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py' 2 | 3 | model = dict( 4 | pretrained=dict(pts='open-mmlab://regnetx_3.2gf'), 5 | pts_backbone=dict( 6 | _delete_=True, 7 | type='NoStemRegNet', 8 | arch='regnetx_3.2gf', 9 | out_indices=(1, 2, 3), 10 | frozen_stages=-1, 11 | strides=(1, 2, 2, 2), 12 | base_channels=64, 13 | stem_channels=64, 14 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 15 | norm_eval=False, 16 | style='pytorch'), 17 | pts_neck=dict(in_channels=[192, 432, 1008])) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/free_anchor/hv_pointpillars_regnet-400mf_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = './hv_pointpillars_fpn_sbn-all_free-anchor_4x8_2x_nus-3d.py' 2 | 3 | model = dict( 4 | pretrained=dict(pts='open-mmlab://regnetx_400mf'), 5 | pts_backbone=dict( 6 | _delete_=True, 7 | type='NoStemRegNet', 8 | arch='regnetx_400mf', 9 | out_indices=(1, 2, 3), 10 | frozen_stages=-1, 11 | strides=(1, 2, 2, 2), 12 | base_channels=64, 13 | stem_channels=64, 14 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 15 | norm_eval=False, 16 | style='pytorch'), 17 | pts_neck=dict(in_channels=[64, 160, 384])) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/h3dnet/README.md: -------------------------------------------------------------------------------- 1 | # H3DNet: 3D Object Detection Using Hybrid Geometric Primitives 2 | 3 | ## Introduction 4 | 5 | 6 | 7 | We implement H3DNet and provide the result and checkpoints on ScanNet datasets. 8 | 9 | ``` 10 | @inproceedings{zhang2020h3dnet, 11 | author = {Zhang, Zaiwei and Sun, Bo and Yang, Haitao and Huang, Qixing}, 12 | title = {H3DNet: 3D Object Detection Using Hybrid Geometric Primitives}, 13 | booktitle = {Proceedings of the European Conference on Computer Vision}, 14 | year = {2020} 15 | } 16 | ``` 17 | 18 | ## Results 19 | 20 | ### ScanNet 21 | 22 | | Backbone | Lr schd | Mem (GB) | Inf time (fps) | AP@0.25 |AP@0.5| Download | 23 | | :---------: | :-----: | :------: | :------------: | :----: |:----: | :------: | 24 | | [MultiBackbone](./h3dnet_3x8_scannet-3d-18class.py) | 3x |7.9||66.43|48.01|[model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/h3dnet/h3dnet_scannet-3d-18class/h3dnet_scannet-3d-18class_20200830_000136-02e36246.pth) | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/h3dnet/h3dnet_scannet-3d-18class/h3dnet_scannet-3d-18class_20200830_000136.log.json) | 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/h3dnet/metafile.yml: -------------------------------------------------------------------------------- 1 | Collections: 2 | - Name: H3DNet 3 | Metadata: 4 | Training Data: ScanNet 5 | Training Techniques: 6 | - AdamW 7 | Training Resources: 8x GeForce GTX 1080 Ti 8 | Architecture: 9 | Paper: https://arxiv.org/abs/2006.05682 10 | README: configs/h3dnet/README.md 11 | 12 | Models: 13 | - Name: h3dnet_3x8_scannet-3d-18class 14 | In Collection: H3DNet 15 | Config: configs/h3dnet/h3dnet_3x8_scannet-3d-18class.py 16 | Metadata: 17 | Training Memory (GB): 7.9 18 | Results: 19 | - Task: 3D Object Detection 20 | Dataset: ScanNet 21 | Metrics: 22 | AP@0.25: 66.43 23 | AP@0.5: 48.01 24 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/h3dnet/h3dnet_scannet-3d-18class/h3dnet_scannet-3d-18class_20200830_000136-02e36246.pth 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/imvotenet/metafile.yml: -------------------------------------------------------------------------------- 1 | Collections: 2 | - Name: ImVoteNet 3 | Metadata: 4 | Training Data: SUNRGBD 5 | Training Techniques: 6 | - AdamW 7 | Training Resources: 8x TITAN Xp 8 | Architecture: 9 | - Faster R-CNN 10 | - VoteNet 11 | - Feature Pyramid Network 12 | Paper: https://arxiv.org/abs/2001.10692 13 | README: configs/imvotenet/README.md 14 | 15 | Models: 16 | - Name: imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class 17 | In Collection: ImVoteNet 18 | Config: configs/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class.py 19 | Metadata: 20 | Training Memory (GB): 2.1 21 | Results: 22 | - Task: Object Detection 23 | Dataset: SUNRGBD-2D 24 | Metrics: 25 | AP@0.5: 62.70 26 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/imvotenet/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class/imvotenet_faster_rcnn_r50_fpn_2x4_sunrgbd-3d-10class_20210323_173222-cad62aeb.pth 27 | 28 | - Name: imvotenet_stage2_16x8_sunrgbd-3d-10class 29 | In Collection: ImVoteNet 30 | Config: configs/imvotenet/imvotenet_stage2_16x8_sunrgbd-3d-10class.py 31 | Metadata: 32 | Training Memory (GB): 9.4 33 | Results: 34 | - Task: 3D Object Detection 35 | Dataset: SUNRGBD-3D 36 | Metrics: 37 | AP@0.25: 64.04 38 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/imvotenet/imvotenet_stage2_16x8_sunrgbd-3d-10class/imvotenet_stage2_16x8_sunrgbd-3d-10class_20210323_184021-d44dcb66.pth 39 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/mvxnet/README.md: -------------------------------------------------------------------------------- 1 | # MVX-Net: Multimodal VoxelNet for 3D Object Detection 2 | 3 | ## Introduction 4 | 5 | 6 | 7 | We implement MVX-Net and provide its results and models on KITTI dataset. 8 | 9 | ``` 10 | @inproceedings{sindagi2019mvx, 11 | title={MVX-Net: Multimodal voxelnet for 3D object detection}, 12 | author={Sindagi, Vishwanath A and Zhou, Yin and Tuzel, Oncel}, 13 | booktitle={2019 International Conference on Robotics and Automation (ICRA)}, 14 | pages={7276--7282}, 15 | year={2019}, 16 | organization={IEEE} 17 | } 18 | 19 | ``` 20 | 21 | ## Results 22 | 23 | ### KITTI 24 | 25 | | Backbone |Class| Lr schd | Mem (GB) | Inf time (fps) | mAP | Download | 26 | | :---------: | :-----: | :------: | :------------: | :----: |:----: | :------: | 27 | | [SECFPN](./dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py)|3 Class|cosine 80e|6.7||63.0|[model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class_20200621_003904-10140f2d.pth) | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class_20200621_003904.log.json)| 28 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/mvxnet/metafile.yml: -------------------------------------------------------------------------------- 1 | Collections: 2 | - Name: MVX-Net 3 | Metadata: 4 | Training Data: KITTI 5 | Training Techniques: 6 | - AdamW 7 | Training Resources: 8x V100 GPUs 8 | Architecture: 9 | - Feature Pyramid Network 10 | - Dynamic Voxelization 11 | Paper: https://arxiv.org/abs/1904.01649 12 | README: configs/mvxnet/README.md 13 | 14 | Models: 15 | - Name: dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class 16 | In Collection: MVX-Net 17 | Config: configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py 18 | Metadata: 19 | Training Memory (GB): 6.7 20 | Results: 21 | - Task: 3D Object Detection 22 | Dataset: KITTI 23 | Metrics: 24 | mAP: 63.0 25 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class_20200621_003904-10140f2d.pth 26 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/cascade_mask_rcnn_r101_fpn_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/cascade_mask_rcnn_r50_fpn_coco-20e_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' 2 | 3 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth' # noqa 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' 2 | 3 | # learning policy 4 | lr_config = dict(step=[16, 19]) 5 | runner = dict(max_epochs=20) 6 | 7 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth' # noqa 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/cascade_mask_rcnn_x101_32x4d_fpn_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './cascade_mask_rcnn_r50_fpn_1x_nuim.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/htc_r50_fpn_coco-20e_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_nuim.py' 2 | 3 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth' # noqa 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/htc_r50_fpn_coco-20e_20e_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_coco-20e_1x_nuim.py' 2 | # learning policy 3 | lr_config = dict(step=[16, 19]) 4 | runner = dict(max_epochs=20) 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e_16x1_20e_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './htc_r50_fpn_1x_nuim.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_64x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=64, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | norm_eval=True, 14 | style='pytorch', 15 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), 16 | stage_with_dcn=(False, True, True, True))) 17 | 18 | data = dict(samples_per_gpu=1, workers_per_gpu=1) 19 | # learning policy 20 | lr_config = dict(step=[16, 19]) 21 | runner = dict(max_epochs=20) 22 | 23 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth' # noqa 24 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/mask_rcnn_r101_fpn_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_1x_nuim.py' 2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) 3 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/mask_rcnn_r50_fpn_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/nuim_instance.py', 4 | '../_base_/schedules/mmdet_schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | model = dict( 7 | roi_head=dict( 8 | bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/mask_rcnn_r50_fpn_coco-2x_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/nuim_instance.py', 4 | '../_base_/schedules/mmdet_schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | model = dict( 7 | roi_head=dict( 8 | bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) 9 | load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth' # noqa 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/mask_rcnn_r50_fpn_coco-2x_1x_nus-2d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/mask_rcnn_r50_fpn.py', 3 | '../_base_/datasets/nuim_instance.py', 4 | '../_base_/schedules/mmdet_schedule_1x.py', '../_base_/default_runtime.py' 5 | ] 6 | model = dict( 7 | roi_head=dict( 8 | bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10))) 9 | 10 | file_client_args = dict( 11 | backend='petrel', 12 | path_mapping=dict({ 13 | './data/nuscenes/': 's3://nuscenes/nuscenes/', 14 | 'data/nuscenes/': 's3://nuscenes/nuscenes/' 15 | })) 16 | img_norm_cfg = dict( 17 | mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) 18 | 19 | test_pipeline = [ 20 | dict(type='LoadImageFromFile', file_client_args=file_client_args), 21 | dict( 22 | type='MultiScaleFlipAug', 23 | img_scale=(1600, 900), 24 | flip=False, 25 | transforms=[ 26 | dict(type='Resize', keep_ratio=True), 27 | dict(type='RandomFlip'), 28 | dict(type='Normalize', **img_norm_cfg), 29 | dict(type='Pad', size_divisor=32), 30 | dict(type='ImageToTensor', keys=['img']), 31 | dict(type='Collect', keys=['img']), 32 | ]) 33 | ] 34 | data_root = 'data/nuimages/' 35 | # data = dict( 36 | # val=dict( 37 | # ann_file=data_root + 'annotations/nuimages_v1.0-mini.json'), 38 | # test=dict( 39 | # ann_file=data_root + 'annotations/nuimages_v1.0-mini.json')) 40 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/nuimages/mask_rcnn_x101_32x4d_fpn_1x_nuim.py: -------------------------------------------------------------------------------- 1 | _base_ = './mask_rcnn_r50_fpn_1x_nuim.py' 2 | model = dict( 3 | pretrained='open-mmlab://resnext101_32x4d', 4 | backbone=dict( 5 | type='ResNeXt', 6 | depth=101, 7 | groups=32, 8 | base_width=4, 9 | num_stages=4, 10 | out_indices=(0, 1, 2, 3), 11 | frozen_stages=1, 12 | norm_cfg=dict(type='BN', requires_grad=True), 13 | style='pytorch')) 14 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/parta2/metafile.yml: -------------------------------------------------------------------------------- 1 | Collections: 2 | - Name: Part-A^2 3 | Metadata: 4 | Training Data: KITTI 5 | Training Techniques: 6 | - AdamW 7 | Training Resources: 8x V100 GPUs 8 | Architecture: 9 | - Sparse U-Net 10 | Paper: https://arxiv.org/abs/1907.03670 11 | README: configs/parta2/README.md 12 | 13 | Models: 14 | - Name: hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class 15 | In Collection: Part-A^2 16 | Config: configs/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class.py 17 | Metadata: 18 | Training Memory (GB): 4.1 19 | Results: 20 | - Task: 3D Object Detection 21 | Dataset: KITTI 22 | Metrics: 23 | mAP: 67.9 24 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-3class_20200620_230724-a2672098.pth 25 | 26 | - Name: hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car 27 | In Collection: Part-A^2 28 | Config: configs/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car.py 29 | Metadata: 30 | Training Memory (GB): 4.0 31 | Results: 32 | - Task: 3D Object Detection 33 | Dataset: KITTI 34 | Metrics: 35 | mAP: 79.16 36 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/parta2/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car/hv_PartA2_secfpn_2x8_cyclic_80e_kitti-3d-car_20200620_230755-f2a38b9a.pth 37 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointnet2/pointnet2_msg_16x2_cosine_250e_scannet_seg-3d-20class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/datasets/scannet_seg-3d-20class.py', 3 | '../_base_/models/pointnet2_msg.py', 4 | '../_base_/schedules/seg_cosine_200e.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | # data settings 8 | data = dict(samples_per_gpu=16) 9 | evaluation = dict(interval=5) 10 | 11 | # model settings 12 | model = dict( 13 | decode_head=dict( 14 | num_classes=20, 15 | ignore_index=20, 16 | # `class_weight` is generated in data pre-processing, saved in 17 | # `data/scannet/seg_info/train_label_weight.npy` 18 | # you can copy paste the values here, or input the file path as 19 | # `class_weight=data/scannet/seg_info/train_label_weight.npy` 20 | loss_decode=dict(class_weight=[ 21 | 2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941, 22 | 4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071, 23 | 5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416, 24 | 5.3954206, 4.6971426 25 | ])), 26 | test_cfg=dict( 27 | num_points=8192, 28 | block_size=1.5, 29 | sample_rate=0.5, 30 | use_normalized_coord=False, 31 | batch_size=24)) 32 | 33 | # runtime settings 34 | checkpoint_config = dict(interval=5) 35 | # PointNet2-MSG needs longer training time than PointNet2-SSG 36 | runner = dict(type='EpochBasedRunner', max_epochs=250) 37 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointnet2/pointnet2_msg_16x2_cosine_80e_s3dis_seg-3d-13class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/datasets/s3dis_seg-3d-13class.py', 3 | '../_base_/models/pointnet2_msg.py', 4 | '../_base_/schedules/seg_cosine_50e.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | # data settings 8 | data = dict(samples_per_gpu=16) 9 | evaluation = dict(interval=2) 10 | 11 | # model settings 12 | model = dict( 13 | backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz] 14 | decode_head=dict( 15 | num_classes=13, ignore_index=13, 16 | loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight 17 | test_cfg=dict( 18 | num_points=4096, 19 | block_size=1.0, 20 | sample_rate=0.5, 21 | use_normalized_coord=True, 22 | batch_size=24)) 23 | 24 | # runtime settings 25 | checkpoint_config = dict(interval=2) 26 | # PointNet2-MSG needs longer training time than PointNet2-SSG 27 | runner = dict(type='EpochBasedRunner', max_epochs=80) 28 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointnet2/pointnet2_ssg_16x2_cosine_200e_scannet_seg-3d-20class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/datasets/scannet_seg-3d-20class.py', 3 | '../_base_/models/pointnet2_ssg.py', 4 | '../_base_/schedules/seg_cosine_200e.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | # data settings 8 | data = dict(samples_per_gpu=16) 9 | evaluation = dict(interval=5) 10 | 11 | # model settings 12 | model = dict( 13 | decode_head=dict( 14 | num_classes=20, 15 | ignore_index=20, 16 | # `class_weight` is generated in data pre-processing, saved in 17 | # `data/scannet/seg_info/train_label_weight.npy` 18 | # you can copy paste the values here, or input the file path as 19 | # `class_weight=data/scannet/seg_info/train_label_weight.npy` 20 | loss_decode=dict(class_weight=[ 21 | 2.389689, 2.7215734, 4.5944676, 4.8543367, 4.096086, 4.907941, 22 | 4.690836, 4.512031, 4.623311, 4.9242644, 5.358117, 5.360071, 23 | 5.019636, 4.967126, 5.3502126, 5.4023647, 5.4027233, 5.4169416, 24 | 5.3954206, 4.6971426 25 | ])), 26 | test_cfg=dict( 27 | num_points=8192, 28 | block_size=1.5, 29 | sample_rate=0.5, 30 | use_normalized_coord=False, 31 | batch_size=24)) 32 | 33 | # runtime settings 34 | checkpoint_config = dict(interval=5) 35 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointnet2/pointnet2_ssg_16x2_cosine_50e_s3dis_seg-3d-13class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/datasets/s3dis_seg-3d-13class.py', 3 | '../_base_/models/pointnet2_ssg.py', 4 | '../_base_/schedules/seg_cosine_50e.py', '../_base_/default_runtime.py' 5 | ] 6 | 7 | # data settings 8 | data = dict(samples_per_gpu=16) 9 | evaluation = dict(interval=2) 10 | 11 | # model settings 12 | model = dict( 13 | backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz] 14 | decode_head=dict( 15 | num_classes=13, ignore_index=13, 16 | loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight 17 | test_cfg=dict( 18 | num_points=4096, 19 | block_size=1.0, 20 | sample_rate=0.5, 21 | use_normalized_coord=True, 22 | batch_size=24)) 23 | 24 | # runtime settings 25 | checkpoint_config = dict(interval=2) 26 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/configs/pointpillars/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_fpn_sbn-all_2x8_2x_lyft-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_lyft.py', 3 | '../_base_/datasets/lyft-3d.py', '../_base_/schedules/schedule_2x.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_nus.py', 3 | '../_base_/datasets/nus-3d.py', '../_base_/schedules/schedule_2x.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_baseline.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_nus_basic.py', 3 | '../_base_/datasets/nus-3d_basic.py', '../_base_/schedules/schedule_2x.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | evaluation = dict(interval=24) 7 | find_unused_parameters = True 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_det.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_nus_basic.py', 3 | '../_base_/datasets/nus-3d_basic.py', '../_base_/schedules/schedule_2x.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | evaluation = dict(interval=2) 7 | find_unused_parameters = True 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_flow.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_nus_flow.py', 3 | '../_base_/datasets/nus-3d_flow.py', '../_base_/schedules/schedule_2x.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | find_unused_parameters = True -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_fpn_sbn-all_range100_2x8_2x_lyft-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_range100_lyft.py', 3 | '../_base_/datasets/range100_lyft-3d.py', 4 | '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-3class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_secfpn_waymo.py', 3 | '../_base_/datasets/waymoD5-3d-3class.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | 8 | # data settings 9 | data = dict(train=dict(dataset=dict(load_interval=1))) 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymo-3d-car.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_secfpn_waymo.py', 3 | '../_base_/datasets/waymoD5-3d-car.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | 8 | # data settings 9 | data = dict(train=dict(dataset=dict(load_interval=1))) 10 | 11 | # model settings 12 | model = dict( 13 | type='MVXFasterRCNN', 14 | pts_bbox_head=dict( 15 | type='Anchor3DHead', 16 | num_classes=1, 17 | anchor_generator=dict( 18 | type='AlignedAnchor3DRangeGenerator', 19 | ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345]], 20 | sizes=[[2.08, 4.73, 1.77]], 21 | rotations=[0, 1.57], 22 | reshape_out=True)), 23 | # model training and testing settings 24 | train_cfg=dict( 25 | _delete_=True, 26 | pts=dict( 27 | assigner=dict( 28 | type='MaxIoUAssigner', 29 | iou_calculator=dict(type='BboxOverlapsNearest3D'), 30 | pos_iou_thr=0.55, 31 | neg_iou_thr=0.4, 32 | min_pos_iou=0.4, 33 | ignore_iof_thr=-1), 34 | allowed_border=0, 35 | code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 36 | pos_weight=-1, 37 | debug=False))) 38 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_secfpn_waymo.py', 3 | '../_base_/datasets/waymoD5-3d-3class.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-car.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_secfpn_waymo.py', 3 | '../_base_/datasets/waymoD5-3d-car.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | 8 | # model settings 9 | model = dict( 10 | type='MVXFasterRCNN', 11 | pts_bbox_head=dict( 12 | type='Anchor3DHead', 13 | num_classes=1, 14 | anchor_generator=dict( 15 | type='AlignedAnchor3DRangeGenerator', 16 | ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345]], 17 | sizes=[[2.08, 4.73, 1.77]], 18 | rotations=[0, 1.57], 19 | reshape_out=True)), 20 | # model training and testing settings 21 | train_cfg=dict( 22 | _delete_=True, 23 | pts=dict( 24 | assigner=dict( 25 | type='MaxIoUAssigner', 26 | iou_calculator=dict(type='BboxOverlapsNearest3D'), 27 | pos_iou_thr=0.55, 28 | neg_iou_thr=0.4, 29 | min_pos_iou=0.4, 30 | ignore_iof_thr=-1), 31 | allowed_border=0, 32 | code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 33 | pos_weight=-1, 34 | debug=False))) 35 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/regnet/hv_pointpillars_regnet-1.6gf_fpn_sbn-all_4x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_nus.py', 3 | '../_base_/datasets/nus-3d.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | # model settings 8 | model = dict( 9 | type='MVXFasterRCNN', 10 | pretrained=dict(pts='open-mmlab://regnetx_1.6gf'), 11 | pts_backbone=dict( 12 | _delete_=True, 13 | type='NoStemRegNet', 14 | arch='regnetx_1.6gf', 15 | out_indices=(1, 2, 3), 16 | frozen_stages=-1, 17 | strides=(1, 2, 2, 2), 18 | base_channels=64, 19 | stem_channels=64, 20 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 21 | norm_eval=False, 22 | style='pytorch'), 23 | pts_neck=dict(in_channels=[168, 408, 912])) 24 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_2x8_2x_lyft-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_lyft.py', 3 | '../_base_/datasets/lyft-3d.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | # model settings 8 | model = dict( 9 | type='MVXFasterRCNN', 10 | pretrained=dict(pts='open-mmlab://regnetx_400mf'), 11 | pts_backbone=dict( 12 | _delete_=True, 13 | type='NoStemRegNet', 14 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 15 | out_indices=(1, 2, 3), 16 | frozen_stages=-1, 17 | strides=(1, 2, 2, 2), 18 | base_channels=64, 19 | stem_channels=64, 20 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 21 | norm_eval=False, 22 | style='pytorch'), 23 | pts_neck=dict(in_channels=[64, 160, 384])) 24 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_4x8_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_nus.py', 3 | '../_base_/datasets/nus-3d.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | # model settings 8 | model = dict( 9 | type='MVXFasterRCNN', 10 | pretrained=dict(pts='open-mmlab://regnetx_400mf'), 11 | pts_backbone=dict( 12 | _delete_=True, 13 | type='NoStemRegNet', 14 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 15 | out_indices=(1, 2, 3), 16 | frozen_stages=-1, 17 | strides=(1, 2, 2, 2), 18 | base_channels=64, 19 | stem_channels=64, 20 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 21 | norm_eval=False, 22 | style='pytorch'), 23 | pts_neck=dict(in_channels=[64, 160, 384])) 24 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/regnet/hv_pointpillars_regnet-400mf_fpn_sbn-all_range100_2x8_2x_lyft-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_pointpillars_fpn_range100_lyft.py', 3 | '../_base_/datasets/range100_lyft-3d.py', 4 | '../_base_/schedules/schedule_2x.py', 5 | '../_base_/default_runtime.py', 6 | ] 7 | # model settings 8 | model = dict( 9 | type='MVXFasterRCNN', 10 | pretrained=dict(pts='open-mmlab://regnetx_400mf'), 11 | pts_backbone=dict( 12 | _delete_=True, 13 | type='NoStemRegNet', 14 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 15 | out_indices=(1, 2, 3), 16 | frozen_stages=-1, 17 | strides=(1, 2, 2, 2), 18 | base_channels=64, 19 | stem_channels=64, 20 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 21 | norm_eval=False, 22 | style='pytorch'), 23 | pts_neck=dict(in_channels=[64, 160, 384])) 24 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_second_secfpn_kitti.py', 3 | '../_base_/datasets/kitti-3d-3class.py', 4 | '../_base_/schedules/cyclic_40e.py', '../_base_/default_runtime.py' 5 | ] 6 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-car.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/models/hv_second_secfpn_kitti.py', 3 | '../_base_/datasets/kitti-3d-car.py', '../_base_/schedules/cyclic_40e.py', 4 | '../_base_/default_runtime.py' 5 | ] 6 | point_cloud_range = [0, -40, -3, 70.4, 40, 1] 7 | model = dict( 8 | bbox_head=dict( 9 | type='Anchor3DHead', 10 | num_classes=1, 11 | anchor_generator=dict( 12 | _delete_=True, 13 | type='Anchor3DRangeGenerator', 14 | ranges=[[0, -40.0, -1.78, 70.4, 40.0, -1.78]], 15 | sizes=[[1.6, 3.9, 1.56]], 16 | rotations=[0, 1.57], 17 | reshape_out=True)), 18 | # model training and testing settings 19 | train_cfg=dict( 20 | _delete_=True, 21 | assigner=dict( 22 | type='MaxIoUAssigner', 23 | iou_calculator=dict(type='BboxOverlapsNearest3D'), 24 | pos_iou_thr=0.6, 25 | neg_iou_thr=0.45, 26 | min_pos_iou=0.45, 27 | ignore_iof_thr=-1), 28 | allowed_border=0, 29 | pos_weight=-1, 30 | debug=False)) 31 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_1x16_2x_lyft-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = './hv_ssn_secfpn_sbn-all_2x16_2x_lyft-3d.py' 2 | # model settings 3 | model = dict( 4 | type='MVXFasterRCNN', 5 | pretrained=dict(pts='open-mmlab://regnetx_400mf'), 6 | pts_backbone=dict( 7 | _delete_=True, 8 | type='NoStemRegNet', 9 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 10 | out_indices=(1, 2, 3), 11 | frozen_stages=-1, 12 | strides=(1, 2, 2, 2), 13 | base_channels=64, 14 | stem_channels=64, 15 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 16 | norm_eval=False, 17 | style='pytorch'), 18 | pts_neck=dict(in_channels=[64, 160, 384])) 19 | # dataset settings 20 | data = dict(samples_per_gpu=1, workers_per_gpu=2) 21 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/ssn/hv_ssn_regnet-400mf_secfpn_sbn-all_2x16_2x_nus-3d.py: -------------------------------------------------------------------------------- 1 | _base_ = './hv_ssn_secfpn_sbn-all_2x16_2x_nus-3d.py' 2 | # model settings 3 | model = dict( 4 | type='MVXFasterRCNN', 5 | pretrained=dict(pts='open-mmlab://regnetx_400mf'), 6 | pts_backbone=dict( 7 | _delete_=True, 8 | type='NoStemRegNet', 9 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 10 | out_indices=(1, 2, 3), 11 | frozen_stages=-1, 12 | strides=(1, 2, 2, 2), 13 | base_channels=64, 14 | stem_channels=64, 15 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), 16 | norm_eval=False, 17 | style='pytorch'), 18 | pts_neck=dict(in_channels=[64, 160, 384])) 19 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/votenet/votenet_16x8_sunrgbd-3d-10class.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../_base_/datasets/sunrgbd-3d-10class.py', '../_base_/models/votenet.py', 3 | '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py' 4 | ] 5 | # model settings 6 | model = dict( 7 | bbox_head=dict( 8 | num_classes=10, 9 | bbox_coder=dict( 10 | type='PartialBinBasedBBoxCoder', 11 | num_sizes=10, 12 | num_dir_bins=12, 13 | with_rot=True, 14 | mean_sizes=[ 15 | [2.114256, 1.620300, 0.927272], [0.791118, 1.279516, 0.718182], 16 | [0.923508, 1.867419, 0.845495], [0.591958, 0.552978, 0.827272], 17 | [0.699104, 0.454178, 0.75625], [0.69519, 1.346299, 0.736364], 18 | [0.528526, 1.002642, 1.172878], [0.500618, 0.632163, 0.683424], 19 | [0.404671, 1.071108, 1.688889], [0.76584, 1.398258, 0.472728] 20 | ]), 21 | )) 22 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/configs/votenet/votenet_iouloss_8x8_scannet-3d-18class.py: -------------------------------------------------------------------------------- 1 | _base_ = ['./votenet_8x8_scannet-3d-18class.py'] 2 | 3 | # model settings, add iou loss 4 | model = dict( 5 | bbox_head=dict( 6 | iou_loss=dict( 7 | type='AxisAlignedIoULoss', reduction='sum', loss_weight=10.0 / 8 | 3.0))) 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/__init__.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from .version import __version__, short_version 4 | 5 | 6 | def digit_version(version_str): 7 | digit_version = [] 8 | for x in version_str.split('.'): 9 | if x.isdigit(): 10 | digit_version.append(int(x)) 11 | elif x.find('rc') != -1: 12 | patch_version = x.split('rc') 13 | digit_version.append(int(patch_version[0]) - 1) 14 | digit_version.append(int(patch_version[1])) 15 | return digit_version 16 | 17 | 18 | mmcv_minimum_version = '1.2.4' 19 | mmcv_maximum_version = '1.4.0' 20 | mmcv_version = digit_version(mmcv.__version__) 21 | 22 | 23 | assert (mmcv_version >= digit_version(mmcv_minimum_version) 24 | and mmcv_version <= digit_version(mmcv_maximum_version)), \ 25 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 26 | f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' 27 | 28 | __all__ = ['__version__', 'short_version'] 29 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/apis/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet/apis/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import (async_inference_detector, inference_detector, 2 | init_detector, show_result_pyplot) 3 | from .test import multi_gpu_test, single_gpu_test 4 | from .train import get_root_logger, set_random_seed, train_detector 5 | 6 | __all__ = [ 7 | 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 8 | 'async_inference_detector', 'inference_detector', 'show_result_pyplot', 9 | 'multi_gpu_test', 'single_gpu_test' 10 | ] 11 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet/core/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .evaluation import * # noqa: F401, F403 4 | from .export import * # noqa: F401, F403 5 | from .mask import * # noqa: F401, F403 6 | from .post_processing import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, 2 | YOLOAnchorGenerator) 3 | from .builder import ANCHOR_GENERATORS, build_anchor_generator 4 | from .point_generator import PointGenerator 5 | from .utils import anchor_inside_flags, calc_region, images_to_levels 6 | 7 | __all__ = [ 8 | 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', 9 | 'PointGenerator', 'images_to_levels', 'calc_region', 10 | 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/anchor/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | ANCHOR_GENERATORS = Registry('Anchor generator') 4 | 5 | 6 | def build_anchor_generator(cfg, default_args=None): 7 | return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args) 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/anchor/point_generator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .builder import ANCHOR_GENERATORS 4 | 5 | 6 | @ANCHOR_GENERATORS.register_module() 7 | class PointGenerator(object): 8 | 9 | def _meshgrid(self, x, y, row_major=True): 10 | xx = x.repeat(len(y)) 11 | yy = y.view(-1, 1).repeat(1, len(x)).view(-1) 12 | if row_major: 13 | return xx, yy 14 | else: 15 | return yy, xx 16 | 17 | def grid_points(self, featmap_size, stride=16, device='cuda'): 18 | feat_h, feat_w = featmap_size 19 | shift_x = torch.arange(0., feat_w, device=device) * stride 20 | shift_y = torch.arange(0., feat_h, device=device) * stride 21 | shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) 22 | stride = shift_x.new_full((shift_xx.shape[0], ), stride) 23 | shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) 24 | all_points = shifts.to(device) 25 | return all_points 26 | 27 | def valid_flags(self, featmap_size, valid_size, device='cuda'): 28 | feat_h, feat_w = featmap_size 29 | valid_h, valid_w = valid_size 30 | assert valid_h <= feat_h and valid_w <= feat_w 31 | valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) 32 | valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) 33 | valid_x[:valid_w] = 1 34 | valid_y[:valid_h] = 1 35 | valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) 36 | valid = valid_xx & valid_yy 37 | return valid 38 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from .approx_max_iou_assigner import ApproxMaxIoUAssigner 2 | from .assign_result import AssignResult 3 | from .atss_assigner import ATSSAssigner 4 | from .base_assigner import BaseAssigner 5 | from .center_region_assigner import CenterRegionAssigner 6 | from .grid_assigner import GridAssigner 7 | from .hungarian_assigner import HungarianAssigner 8 | from .max_iou_assigner import MaxIoUAssigner 9 | from .point_assigner import PointAssigner 10 | from .region_assigner import RegionAssigner 11 | 12 | __all__ = [ 13 | 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 14 | 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', 15 | 'HungarianAssigner', 'RegionAssigner' 16 | ] 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseAssigner(metaclass=ABCMeta): 5 | """Base assigner that assigns boxes to ground truth boxes.""" 6 | 7 | @abstractmethod 8 | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 9 | """Assign boxes to either a ground truth boxes or a negative boxes.""" 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | BBOX_ASSIGNERS = Registry('bbox_assigner') 4 | BBOX_SAMPLERS = Registry('bbox_sampler') 5 | BBOX_CODERS = Registry('bbox_coder') 6 | 7 | 8 | def build_assigner(cfg, **default_args): 9 | """Builder of box assigner.""" 10 | return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) 11 | 12 | 13 | def build_sampler(cfg, **default_args): 14 | """Builder of box sampler.""" 15 | return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) 16 | 17 | 18 | def build_bbox_coder(cfg, **default_args): 19 | """Builder of box coder.""" 20 | return build_from_cfg(cfg, BBOX_CODERS, default_args) 21 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/coder/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_bbox_coder import BaseBBoxCoder 2 | from .bucketing_bbox_coder import BucketingBBoxCoder 3 | from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder 4 | from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder 5 | from .pseudo_bbox_coder import PseudoBBoxCoder 6 | from .tblr_bbox_coder import TBLRBBoxCoder 7 | from .yolo_bbox_coder import YOLOBBoxCoder 8 | 9 | __all__ = [ 10 | 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 11 | 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', 12 | 'BucketingBBoxCoder' 13 | ] 14 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/coder/base_bbox_coder.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BaseBBoxCoder(metaclass=ABCMeta): 5 | """Base bounding box coder.""" 6 | 7 | def __init__(self, **kwargs): 8 | pass 9 | 10 | @abstractmethod 11 | def encode(self, bboxes, gt_bboxes): 12 | """Encode deltas between bboxes and ground truth boxes.""" 13 | 14 | @abstractmethod 15 | def decode(self, bboxes, bboxes_pred): 16 | """Decode the predicted bboxes according to prediction and base 17 | boxes.""" 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/coder/pseudo_bbox_coder.py: -------------------------------------------------------------------------------- 1 | from ..builder import BBOX_CODERS 2 | from .base_bbox_coder import BaseBBoxCoder 3 | 4 | 5 | @BBOX_CODERS.register_module() 6 | class PseudoBBoxCoder(BaseBBoxCoder): 7 | """Pseudo bounding box coder.""" 8 | 9 | def __init__(self, **kwargs): 10 | super(BaseBBoxCoder, self).__init__(**kwargs) 11 | 12 | def encode(self, bboxes, gt_bboxes): 13 | """torch.Tensor: return the given ``bboxes``""" 14 | return gt_bboxes 15 | 16 | def decode(self, bboxes, pred_bboxes): 17 | """torch.Tensor: return the given ``pred_bboxes``""" 18 | return pred_bboxes 19 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/demodata.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from mmdet.utils.util_random import ensure_rng 5 | 6 | 7 | def random_boxes(num=1, scale=1, rng=None): 8 | """Simple version of ``kwimage.Boxes.random`` 9 | 10 | Returns: 11 | Tensor: shape (n, 4) in x1, y1, x2, y2 format. 12 | 13 | References: 14 | https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 15 | 16 | Example: 17 | >>> num = 3 18 | >>> scale = 512 19 | >>> rng = 0 20 | >>> boxes = random_boxes(num, scale, rng) 21 | >>> print(boxes) 22 | tensor([[280.9925, 278.9802, 308.6148, 366.1769], 23 | [216.9113, 330.6978, 224.0446, 456.5878], 24 | [405.3632, 196.3221, 493.3953, 270.7942]]) 25 | """ 26 | rng = ensure_rng(rng) 27 | 28 | tlbr = rng.rand(num, 4).astype(np.float32) 29 | 30 | tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) 31 | tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) 32 | br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) 33 | br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) 34 | 35 | tlbr[:, 0] = tl_x * scale 36 | tlbr[:, 1] = tl_y * scale 37 | tlbr[:, 2] = br_x * scale 38 | tlbr[:, 3] = br_y * scale 39 | 40 | boxes = torch.from_numpy(tlbr) 41 | return boxes 42 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/iou_calculators/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_iou_calculator 2 | from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps 3 | 4 | __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/iou_calculators/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | IOU_CALCULATORS = Registry('IoU calculator') 4 | 5 | 6 | def build_iou_calculator(cfg, default_args=None): 7 | """Builder of IoU calculator.""" 8 | return build_from_cfg(cfg, IOU_CALCULATORS, default_args) 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/match_costs/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_match_cost 2 | from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost 3 | 4 | __all__ = [ 5 | 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 6 | 'FocalLossCost' 7 | ] 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/match_costs/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | MATCH_COST = Registry('Match Cost') 4 | 5 | 6 | def build_match_cost(cfg, default_args=None): 7 | """Builder of IoU calculator.""" 8 | return build_from_cfg(cfg, MATCH_COST, default_args) 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_sampler import BaseSampler 2 | from .combined_sampler import CombinedSampler 3 | from .instance_balanced_pos_sampler import InstanceBalancedPosSampler 4 | from .iou_balanced_neg_sampler import IoUBalancedNegSampler 5 | from .ohem_sampler import OHEMSampler 6 | from .pseudo_sampler import PseudoSampler 7 | from .random_sampler import RandomSampler 8 | from .sampling_result import SamplingResult 9 | from .score_hlr_sampler import ScoreHLRSampler 10 | 11 | __all__ = [ 12 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 13 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 14 | 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler' 15 | ] 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/bbox/samplers/combined_sampler.py: -------------------------------------------------------------------------------- 1 | from ..builder import BBOX_SAMPLERS, build_sampler 2 | from .base_sampler import BaseSampler 3 | 4 | 5 | @BBOX_SAMPLERS.register_module() 6 | class CombinedSampler(BaseSampler): 7 | """A sampler that combines positive sampler and negative sampler.""" 8 | 9 | def __init__(self, pos_sampler, neg_sampler, **kwargs): 10 | super(CombinedSampler, self).__init__(**kwargs) 11 | self.pos_sampler = build_sampler(pos_sampler, **kwargs) 12 | self.neg_sampler = build_sampler(neg_sampler, **kwargs) 13 | 14 | def _sample_pos(self, **kwargs): 15 | """Sample positive samples.""" 16 | raise NotImplementedError 17 | 18 | def _sample_neg(self, **kwargs): 19 | """Sample negative samples.""" 20 | raise NotImplementedError 21 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, 2 | get_classes, imagenet_det_classes, 3 | imagenet_vid_classes, voc_classes) 4 | from .eval_hooks import DistEvalHook, EvalHook 5 | from .mean_ap import average_precision, eval_map, print_map_summary 6 | from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, 7 | print_recall_summary) 8 | 9 | __all__ = [ 10 | 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 11 | 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 12 | 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 13 | 'print_map_summary', 'eval_recalls', 'print_recall_summary', 14 | 'plot_num_recall', 'plot_iou_recall' 15 | ] 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/export/__init__.py: -------------------------------------------------------------------------------- 1 | from .pytorch2onnx import (build_model_from_cfg, 2 | generate_inputs_and_wrap_model, 3 | preprocess_example_input) 4 | 5 | __all__ = [ 6 | 'build_model_from_cfg', 'generate_inputs_and_wrap_model', 7 | 'preprocess_example_input' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/mask/__init__.py: -------------------------------------------------------------------------------- 1 | from .mask_target import mask_target 2 | from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks 3 | from .utils import encode_mask_results, split_combined_polys 4 | 5 | __all__ = [ 6 | 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', 7 | 'PolygonMasks', 'encode_mask_results' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_nms import fast_nms, multiclass_nms 2 | from .merge_augs import (merge_aug_bboxes, merge_aug_masks, 3 | merge_aug_proposals, merge_aug_scores) 4 | 5 | __all__ = [ 6 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 7 | 'merge_aug_scores', 'merge_aug_masks', 'fast_nms' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dist_utils import DistOptimizerHook, allreduce_grads, reduce_mean 2 | from .misc import mask2ndarray, multi_apply, unmap 3 | 4 | __all__ = [ 5 | 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 6 | 'unmap', 'mask2ndarray' 7 | ] 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/core/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .image import (color_val_matplotlib, imshow_det_bboxes, 2 | imshow_gt_det_bboxes) 3 | 4 | __all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/datasets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet/datasets/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset 2 | from .cityscapes import CityscapesDataset 3 | from .coco import CocoDataset 4 | from .custom import CustomDataset 5 | from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, 6 | RepeatDataset) 7 | from .deepfashion import DeepFashionDataset 8 | from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset 9 | from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler 10 | from .utils import (NumClassCheckHook, get_loading_pipeline, 11 | replace_ImageToTensor) 12 | from .voc import VOCDataset 13 | from .wider_face import WIDERFaceDataset 14 | from .xml_style import XMLDataset 15 | 16 | __all__ = [ 17 | 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 18 | 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 19 | 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler', 20 | 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 21 | 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 22 | 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', 23 | 'NumClassCheckHook' 24 | ] 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/datasets/deepfashion.py: -------------------------------------------------------------------------------- 1 | from .builder import DATASETS 2 | from .coco import CocoDataset 3 | 4 | 5 | @DATASETS.register_module() 6 | class DeepFashionDataset(CocoDataset): 7 | 8 | CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 9 | 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 10 | 'skin', 'face') 11 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, 2 | ContrastTransform, EqualizeTransform, Rotate, Shear, 3 | Translate) 4 | from .compose import Compose 5 | from .formating import (Collect, DefaultFormatBundle, ImageToTensor, 6 | ToDataContainer, ToTensor, Transpose, to_tensor) 7 | from .instaboost import InstaBoost 8 | from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam, 9 | LoadMultiChannelImageFromFiles, LoadProposals) 10 | from .test_time_aug import MultiScaleFlipAug 11 | from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize, 12 | Pad, PhotoMetricDistortion, RandomCenterCropPad, 13 | RandomCrop, RandomFlip, Resize, SegRescale) 14 | 15 | __all__ = [ 16 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 17 | 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', 18 | 'LoadImageFromFile', 'LoadImageFromWebcam', 19 | 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug', 20 | 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale', 21 | 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu', 22 | 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear', 23 | 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform', 24 | 'ContrastTransform', 'Translate' 25 | ] 26 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .distributed_sampler import DistributedSampler 2 | from .group_sampler import DistributedGroupSampler, GroupSampler 3 | 4 | __all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/datasets/samplers/distributed_sampler.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | from torch.utils.data import DistributedSampler as _DistributedSampler 5 | 6 | 7 | class DistributedSampler(_DistributedSampler): 8 | 9 | def __init__(self, 10 | dataset, 11 | num_replicas=None, 12 | rank=None, 13 | shuffle=True, 14 | seed=0): 15 | super().__init__( 16 | dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) 17 | # for the compatibility from PyTorch 1.3+ 18 | self.seed = seed if seed is not None else 0 19 | 20 | def __iter__(self): 21 | # deterministically shuffle based on epoch 22 | if self.shuffle: 23 | g = torch.Generator() 24 | g.manual_seed(self.epoch + self.seed) 25 | indices = torch.randperm(len(self.dataset), generator=g).tolist() 26 | else: 27 | indices = torch.arange(len(self.dataset)).tolist() 28 | 29 | # add extra samples to make it evenly divisible 30 | # in case that indices is shorter than half of total_size 31 | indices = (indices * 32 | math.ceil(self.total_size / len(indices)))[:self.total_size] 33 | assert len(indices) == self.total_size 34 | 35 | # subsample 36 | indices = indices[self.rank:self.total_size:self.num_replicas] 37 | assert len(indices) == self.num_samples 38 | 39 | return iter(indices) 40 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet/models/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, 3 | ROI_EXTRACTORS, SHARED_HEADS, build_backbone, 4 | build_detector, build_head, build_loss, build_neck, 5 | build_roi_extractor, build_shared_head) 6 | from .dense_heads import * # noqa: F401,F403 7 | from .detectors import * # noqa: F401,F403 8 | from .losses import * # noqa: F401,F403 9 | from .necks import * # noqa: F401,F403 10 | from .roi_heads import * # noqa: F401,F403 11 | 12 | __all__ = [ 13 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 14 | 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 15 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector' 16 | ] 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .darknet import Darknet 2 | from .detectors_resnet import DetectoRS_ResNet 3 | from .detectors_resnext import DetectoRS_ResNeXt 4 | from .hourglass import HourglassNet 5 | from .hrnet import HRNet 6 | from .regnet import RegNet 7 | from .res2net import Res2Net 8 | from .resnest import ResNeSt 9 | from .resnet import ResNet, ResNetV1d 10 | from .resnext import ResNeXt 11 | from .ssd_vgg import SSDVGG 12 | from .trident_resnet import TridentResNet 13 | 14 | __all__ = [ 15 | 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net', 16 | 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet', 17 | 'ResNeSt', 'TridentResNet' 18 | ] 19 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet/models/detectors/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/atss.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class ATSS(SingleStageDetector): 7 | """Implementation of `ATSS `_.""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/faster_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FasterRCNN(TwoStageDetector): 7 | """Implementation of `Faster R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | rpn_head, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | pretrained=None): 17 | super(FasterRCNN, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | roi_head=roi_head, 22 | train_cfg=train_cfg, 23 | test_cfg=test_cfg, 24 | pretrained=pretrained) 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/fcos.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FCOS(SingleStageDetector): 7 | """Implementation of `FCOS `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/fovea.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FOVEA(SingleStageDetector): 7 | """Implementation of `FoveaBox `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/fsaf.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FSAF(SingleStageDetector): 7 | """Implementation of `FSAF `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/gfl.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class GFL(SingleStageDetector): 7 | 8 | def __init__(self, 9 | backbone, 10 | neck, 11 | bbox_head, 12 | train_cfg=None, 13 | test_cfg=None, 14 | pretrained=None): 15 | super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, 16 | test_cfg, pretrained) 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/grid_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class GridRCNN(TwoStageDetector): 7 | """Grid R-CNN. 8 | 9 | This detector is the implementation of: 10 | - Grid R-CNN (https://arxiv.org/abs/1811.12030) 11 | - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) 12 | """ 13 | 14 | def __init__(self, 15 | backbone, 16 | rpn_head, 17 | roi_head, 18 | train_cfg, 19 | test_cfg, 20 | neck=None, 21 | pretrained=None): 22 | super(GridRCNN, self).__init__( 23 | backbone=backbone, 24 | neck=neck, 25 | rpn_head=rpn_head, 26 | roi_head=roi_head, 27 | train_cfg=train_cfg, 28 | test_cfg=test_cfg, 29 | pretrained=pretrained) 30 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/htc.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .cascade_rcnn import CascadeRCNN 3 | 4 | 5 | @DETECTORS.register_module() 6 | class HybridTaskCascade(CascadeRCNN): 7 | """Implementation of `HTC `_""" 8 | 9 | def __init__(self, **kwargs): 10 | super(HybridTaskCascade, self).__init__(**kwargs) 11 | 12 | @property 13 | def with_semantic(self): 14 | """bool: whether the detector has a semantic head""" 15 | return self.roi_head.with_semantic 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class MaskRCNN(TwoStageDetector): 7 | """Implementation of `Mask R-CNN `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | rpn_head, 12 | roi_head, 13 | train_cfg, 14 | test_cfg, 15 | neck=None, 16 | pretrained=None): 17 | super(MaskRCNN, self).__init__( 18 | backbone=backbone, 19 | neck=neck, 20 | rpn_head=rpn_head, 21 | roi_head=roi_head, 22 | train_cfg=train_cfg, 23 | test_cfg=test_cfg, 24 | pretrained=pretrained) 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/mask_scoring_rcnn.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class MaskScoringRCNN(TwoStageDetector): 7 | """Mask Scoring RCNN. 8 | 9 | https://arxiv.org/abs/1903.00241 10 | """ 11 | 12 | def __init__(self, 13 | backbone, 14 | rpn_head, 15 | roi_head, 16 | train_cfg, 17 | test_cfg, 18 | neck=None, 19 | pretrained=None): 20 | super(MaskScoringRCNN, self).__init__( 21 | backbone=backbone, 22 | neck=neck, 23 | rpn_head=rpn_head, 24 | roi_head=roi_head, 25 | train_cfg=train_cfg, 26 | test_cfg=test_cfg, 27 | pretrained=pretrained) 28 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/nasfcos.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class NASFCOS(SingleStageDetector): 7 | """NAS-FCOS: Fast Neural Architecture Search for Object Detection. 8 | 9 | https://arxiv.org/abs/1906.0442 10 | """ 11 | 12 | def __init__(self, 13 | backbone, 14 | neck, 15 | bbox_head, 16 | train_cfg=None, 17 | test_cfg=None, 18 | pretrained=None): 19 | super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, 20 | test_cfg, pretrained) 21 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/paa.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class PAA(SingleStageDetector): 7 | """Implementation of `PAA `_.""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/point_rend.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .two_stage import TwoStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class PointRend(TwoStageDetector): 7 | """PointRend: Image Segmentation as Rendering 8 | 9 | This detector is the implementation of 10 | `PointRend `_. 11 | 12 | """ 13 | 14 | def __init__(self, 15 | backbone, 16 | rpn_head, 17 | roi_head, 18 | train_cfg, 19 | test_cfg, 20 | neck=None, 21 | pretrained=None): 22 | super(PointRend, self).__init__( 23 | backbone=backbone, 24 | neck=neck, 25 | rpn_head=rpn_head, 26 | roi_head=roi_head, 27 | train_cfg=train_cfg, 28 | test_cfg=test_cfg, 29 | pretrained=pretrained) 30 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/reppoints_detector.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class RepPointsDetector(SingleStageDetector): 7 | """RepPoints: Point Set Representation for Object Detection. 8 | 9 | This detector is the implementation of: 10 | - RepPoints detector (https://arxiv.org/pdf/1904.11490) 11 | """ 12 | 13 | def __init__(self, 14 | backbone, 15 | neck, 16 | bbox_head, 17 | train_cfg=None, 18 | test_cfg=None, 19 | pretrained=None): 20 | super(RepPointsDetector, 21 | self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, 22 | pretrained) 23 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/retinanet.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class RetinaNet(SingleStageDetector): 7 | """Implementation of `RetinaNet `_""" 8 | 9 | def __init__(self, 10 | backbone, 11 | neck, 12 | bbox_head, 13 | train_cfg=None, 14 | test_cfg=None, 15 | pretrained=None): 16 | super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, 17 | test_cfg, pretrained) 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/scnet.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .cascade_rcnn import CascadeRCNN 3 | 4 | 5 | @DETECTORS.register_module() 6 | class SCNet(CascadeRCNN): 7 | """Implementation of `SCNet `_""" 8 | 9 | def __init__(self, **kwargs): 10 | super(SCNet, self).__init__(**kwargs) 11 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/vfnet.py: -------------------------------------------------------------------------------- 1 | from ..builder import DETECTORS 2 | from .single_stage import SingleStageDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class VFNet(SingleStageDetector): 7 | """Implementation of `VarifocalNet 8 | (VFNet).`_""" 9 | 10 | def __init__(self, 11 | backbone, 12 | neck, 13 | bbox_head, 14 | train_cfg=None, 15 | test_cfg=None, 16 | pretrained=None): 17 | super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, 18 | test_cfg, pretrained) 19 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/detectors/yolo.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 Western Digital Corporation or its affiliates. 2 | 3 | from ..builder import DETECTORS 4 | from .single_stage import SingleStageDetector 5 | 6 | 7 | @DETECTORS.register_module() 8 | class YOLOV3(SingleStageDetector): 9 | 10 | def __init__(self, 11 | backbone, 12 | neck, 13 | bbox_head, 14 | train_cfg=None, 15 | test_cfg=None, 16 | pretrained=None): 17 | super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, 18 | test_cfg, pretrained) 19 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .bfp import BFP 2 | from .channel_mapper import ChannelMapper 3 | from .fpg import FPG 4 | from .fpn import FPN 5 | from .fpn_carafe import FPN_CARAFE 6 | from .hrfpn import HRFPN 7 | from .nas_fpn import NASFPN 8 | from .nasfcos_fpn import NASFCOS_FPN 9 | from .pafpn import PAFPN 10 | from .rfp import RFP 11 | from .yolo_neck import YOLOV3Neck 12 | 13 | __all__ = [ 14 | 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', 15 | 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG' 16 | ] 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/roi_heads/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .bbox_head import BBoxHead 2 | from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, 3 | Shared4Conv1FCBBoxHead) 4 | from .dii_head import DIIHead 5 | from .double_bbox_head import DoubleConvFCBBoxHead 6 | from .sabl_head import SABLHead 7 | from .scnet_bbox_head import SCNetBBoxHead 8 | 9 | __all__ = [ 10 | 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 11 | 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead', 12 | 'SCNetBBoxHead' 13 | ] 14 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/roi_heads/double_roi_head.py: -------------------------------------------------------------------------------- 1 | from ..builder import HEADS 2 | from .standard_roi_head import StandardRoIHead 3 | 4 | 5 | @HEADS.register_module() 6 | class DoubleHeadRoIHead(StandardRoIHead): 7 | """RoI head for Double Head RCNN. 8 | 9 | https://arxiv.org/abs/1904.06493 10 | """ 11 | 12 | def __init__(self, reg_roi_scale_factor, **kwargs): 13 | super(DoubleHeadRoIHead, self).__init__(**kwargs) 14 | self.reg_roi_scale_factor = reg_roi_scale_factor 15 | 16 | def _bbox_forward(self, x, rois): 17 | """Box head forward function used in both training and testing time.""" 18 | bbox_cls_feats = self.bbox_roi_extractor( 19 | x[:self.bbox_roi_extractor.num_inputs], rois) 20 | bbox_reg_feats = self.bbox_roi_extractor( 21 | x[:self.bbox_roi_extractor.num_inputs], 22 | rois, 23 | roi_scale_factor=self.reg_roi_scale_factor) 24 | if self.with_shared_head: 25 | bbox_cls_feats = self.shared_head(bbox_cls_feats) 26 | bbox_reg_feats = self.shared_head(bbox_reg_feats) 27 | cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) 28 | 29 | bbox_results = dict( 30 | cls_score=cls_score, 31 | bbox_pred=bbox_pred, 32 | bbox_feats=bbox_cls_feats) 33 | return bbox_results 34 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/roi_heads/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .coarse_mask_head import CoarseMaskHead 2 | from .fcn_mask_head import FCNMaskHead 3 | from .feature_relay_head import FeatureRelayHead 4 | from .fused_semantic_head import FusedSemanticHead 5 | from .global_context_head import GlobalContextHead 6 | from .grid_head import GridHead 7 | from .htc_mask_head import HTCMaskHead 8 | from .mask_point_head import MaskPointHead 9 | from .maskiou_head import MaskIoUHead 10 | from .scnet_mask_head import SCNetMaskHead 11 | from .scnet_semantic_head import SCNetSemanticHead 12 | 13 | __all__ = [ 14 | 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 15 | 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead', 16 | 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead' 17 | ] 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import HEADS 2 | from mmdet.models.utils import ResLayer, SimplifiedBasicBlock 3 | from .fcn_mask_head import FCNMaskHead 4 | 5 | 6 | @HEADS.register_module() 7 | class SCNetMaskHead(FCNMaskHead): 8 | """Mask head for `SCNet `_. 9 | 10 | Args: 11 | conv_to_res (bool, optional): if True, change the conv layers to 12 | ``SimplifiedBasicBlock``. 13 | """ 14 | 15 | def __init__(self, conv_to_res=True, **kwargs): 16 | super(SCNetMaskHead, self).__init__(**kwargs) 17 | self.conv_to_res = conv_to_res 18 | if conv_to_res: 19 | assert self.conv_kernel_size == 3 20 | self.num_res_blocks = self.num_convs // 2 21 | self.convs = ResLayer( 22 | SimplifiedBasicBlock, 23 | self.in_channels, 24 | self.conv_out_channels, 25 | self.num_res_blocks, 26 | conv_cfg=self.conv_cfg, 27 | norm_cfg=self.norm_cfg) 28 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import HEADS 2 | from mmdet.models.utils import ResLayer, SimplifiedBasicBlock 3 | from .fused_semantic_head import FusedSemanticHead 4 | 5 | 6 | @HEADS.register_module() 7 | class SCNetSemanticHead(FusedSemanticHead): 8 | """Mask head for `SCNet `_. 9 | 10 | Args: 11 | conv_to_res (bool, optional): if True, change the conv layers to 12 | ``SimplifiedBasicBlock``. 13 | """ 14 | 15 | def __init__(self, conv_to_res=True, **kwargs): 16 | super(SCNetSemanticHead, self).__init__(**kwargs) 17 | self.conv_to_res = conv_to_res 18 | if self.conv_to_res: 19 | num_res_blocks = self.num_convs // 2 20 | self.convs = ResLayer( 21 | SimplifiedBasicBlock, 22 | self.in_channels, 23 | self.conv_out_channels, 24 | num_res_blocks, 25 | conv_cfg=self.conv_cfg, 26 | norm_cfg=self.norm_cfg) 27 | self.num_convs = num_res_blocks 28 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/roi_heads/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from .generic_roi_extractor import GenericRoIExtractor 2 | from .single_level_roi_extractor import SingleRoIExtractor 3 | 4 | __all__ = [ 5 | 'SingleRoIExtractor', 6 | 'GenericRoIExtractor', 7 | ] 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/roi_heads/shared_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .res_layer import ResLayer 2 | 3 | __all__ = ['ResLayer'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_positional_encoding, build_transformer 2 | from .gaussian_target import gaussian_radius, gen_gaussian_target 3 | from .positional_encoding import (LearnedPositionalEncoding, 4 | SinePositionalEncoding) 5 | from .res_layer import ResLayer, SimplifiedBasicBlock 6 | from .transformer import (FFN, DynamicConv, MultiheadAttention, Transformer, 7 | TransformerDecoder, TransformerDecoderLayer, 8 | TransformerEncoder, TransformerEncoderLayer) 9 | 10 | __all__ = [ 11 | 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'MultiheadAttention', 12 | 'FFN', 'TransformerEncoderLayer', 'TransformerEncoder', 13 | 'TransformerDecoderLayer', 'TransformerDecoder', 'Transformer', 14 | 'build_transformer', 'build_positional_encoding', 'SinePositionalEncoding', 15 | 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock' 16 | ] 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/models/utils/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | TRANSFORMER = Registry('Transformer') 4 | POSITIONAL_ENCODING = Registry('Position encoding') 5 | 6 | 7 | def build_transformer(cfg, default_args=None): 8 | """Builder for Transformer.""" 9 | return build_from_cfg(cfg, TRANSFORMER, default_args) 10 | 11 | 12 | def build_positional_encoding(cfg, default_args=None): 13 | """Builder for Position Encoding.""" 14 | return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args) 15 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .logger import get_root_logger 3 | 4 | __all__ = ['get_root_logger', 'collect_env'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import collect_env as collect_base_env 2 | from mmcv.utils import get_git_hash 3 | 4 | import mmdet 5 | 6 | 7 | def collect_env(): 8 | """Collect the information of the running environments.""" 9 | env_info = collect_base_env() 10 | env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] 11 | return env_info 12 | 13 | 14 | if __name__ == '__main__': 15 | for name, val in collect_env().items(): 16 | print(f'{name}: {val}') 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from mmcv.utils import get_logger 4 | 5 | 6 | def get_root_logger(log_file=None, log_level=logging.INFO): 7 | """Get root logger. 8 | 9 | Args: 10 | log_file (str, optional): File path of log. Defaults to None. 11 | log_level (int, optional): The level of logger. 12 | Defaults to logging.INFO. 13 | 14 | Returns: 15 | :obj:`logging.Logger`: The obtained logger 16 | """ 17 | logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) 18 | 19 | return logger 20 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/utils/profiling.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import sys 3 | import time 4 | 5 | import torch 6 | 7 | if sys.version_info >= (3, 7): 8 | 9 | @contextlib.contextmanager 10 | def profile_time(trace_name, 11 | name, 12 | enabled=True, 13 | stream=None, 14 | end_stream=None): 15 | """Print time spent by CPU and GPU. 16 | 17 | Useful as a temporary context manager to find sweet spots of code 18 | suitable for async implementation. 19 | """ 20 | if (not enabled) or not torch.cuda.is_available(): 21 | yield 22 | return 23 | stream = stream if stream else torch.cuda.current_stream() 24 | end_stream = end_stream if end_stream else stream 25 | start = torch.cuda.Event(enable_timing=True) 26 | end = torch.cuda.Event(enable_timing=True) 27 | stream.record_event(start) 28 | try: 29 | cpu_start = time.monotonic() 30 | yield 31 | finally: 32 | cpu_end = time.monotonic() 33 | end_stream.record_event(end) 34 | end.synchronize() 35 | cpu_time = (cpu_end - cpu_start) * 1000 36 | gpu_time = start.elapsed_time(end) 37 | msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' 38 | msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' 39 | print(msg, end_stream) 40 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/utils/util_random.py: -------------------------------------------------------------------------------- 1 | """Helpers for random number generators.""" 2 | import numpy as np 3 | 4 | 5 | def ensure_rng(rng=None): 6 | """Coerces input into a random number generator. 7 | 8 | If the input is None, then a global random state is returned. 9 | 10 | If the input is a numeric value, then that is used as a seed to construct a 11 | random state. Otherwise the input is returned as-is. 12 | 13 | Adapted from [1]_. 14 | 15 | Args: 16 | rng (int | numpy.random.RandomState | None): 17 | if None, then defaults to the global rng. Otherwise this can be an 18 | integer or a RandomState class 19 | Returns: 20 | (numpy.random.RandomState) : rng - 21 | a numpy random number generator 22 | 23 | References: 24 | .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 25 | """ 26 | 27 | if rng is None: 28 | rng = np.random.mtrand._rand 29 | elif isinstance(rng, int): 30 | rng = np.random.RandomState(rng) 31 | else: 32 | rng = rng 33 | return rng 34 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | 3 | __version__ = '2.11.0' 4 | short_version = __version__ 5 | 6 | 7 | def parse_version_info(version_str): 8 | version_info = [] 9 | for x in version_str.split('.'): 10 | if x.isdigit(): 11 | version_info.append(int(x)) 12 | elif x.find('rc') != -1: 13 | patch_version = x.split('rc') 14 | version_info.append(int(patch_version[0])) 15 | version_info.append(f'rc{patch_version[1]}') 16 | return tuple(version_info) 17 | 18 | 19 | version_info = parse_version_info(__version__) 20 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import (convert_SyncBN, inference_detector, 2 | inference_multi_modality_detector, inference_segmentor, 3 | init_model, show_result_meshlab) 4 | from .test import single_gpu_test 5 | from .train import train_model 6 | 7 | __all__ = [ 8 | 'inference_detector', 'init_model', 'single_gpu_test', 9 | 'show_result_meshlab', 'convert_SyncBN', 'train_model', 10 | 'inference_multi_modality_detector', 'inference_segmentor' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/apis/train.py: -------------------------------------------------------------------------------- 1 | from mmdet.apis import train_detector 2 | from mmseg.apis import train_segmentor 3 | 4 | 5 | def train_model(model, 6 | dataset, 7 | cfg, 8 | distributed=False, 9 | validate=False, 10 | timestamp=None, 11 | meta=None): 12 | """A function wrapper for launching model training according to cfg. 13 | 14 | Because we need different eval_hook in runner. Should be deprecated in the 15 | future. 16 | """ 17 | if cfg.model.type in ['EncoderDecoder3D']: 18 | train_segmentor( 19 | model, 20 | dataset, 21 | cfg, 22 | distributed=distributed, 23 | validate=validate, 24 | timestamp=timestamp, 25 | meta=meta) 26 | else: 27 | train_detector( 28 | model, 29 | dataset, 30 | cfg, 31 | distributed=distributed, 32 | validate=validate, 33 | timestamp=timestamp, 34 | meta=meta) 35 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor import * # noqa: F401, F403 2 | from .bbox import * # noqa: F401, F403 3 | from .evaluation import * # noqa: F401, F403 4 | from .points import * # noqa: F401, F403 5 | from .post_processing import * # noqa: F401, F403 6 | from .utils import * # noqa: F401, F403 7 | from .visualizer import * # noqa: F401, F403 8 | from .voxel import * # noqa: F401, F403 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.core.anchor import build_anchor_generator 2 | from .anchor_3d_generator import (AlignedAnchor3DRangeGenerator, 3 | AlignedAnchor3DRangeGeneratorPerCls, 4 | Anchor3DRangeGenerator) 5 | 6 | __all__ = [ 7 | 'AlignedAnchor3DRangeGenerator', 'Anchor3DRangeGenerator', 8 | 'build_anchor_generator', 'AlignedAnchor3DRangeGeneratorPerCls' 9 | ] 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.core.bbox import AssignResult, BaseAssigner, MaxIoUAssigner 2 | 3 | __all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/bbox/coders/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.core.bbox import build_bbox_coder 2 | from .anchor_free_bbox_coder import AnchorFreeBBoxCoder 3 | from .centerpoint_bbox_coders import CenterPointBBoxCoder 4 | from .delta_xyzwhlr_bbox_coder import DeltaXYZWLHRBBoxCoder 5 | from .partial_bin_based_bbox_coder import PartialBinBasedBBoxCoder 6 | 7 | __all__ = [ 8 | 'build_bbox_coder', 'DeltaXYZWLHRBBoxCoder', 'PartialBinBasedBBoxCoder', 9 | 'CenterPointBBoxCoder', 'AnchorFreeBBoxCoder' 10 | ] 11 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/bbox/iou_calculators/__init__.py: -------------------------------------------------------------------------------- 1 | from .iou3d_calculator import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D, 2 | BboxOverlapsNearest3D, 3 | axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d, 4 | bbox_overlaps_nearest_3d) 5 | 6 | __all__ = [ 7 | 'BboxOverlapsNearest3D', 'BboxOverlaps3D', 'bbox_overlaps_nearest_3d', 8 | 'bbox_overlaps_3d', 'AxisAlignedBboxOverlaps3D', 9 | 'axis_aligned_bbox_overlaps_3d' 10 | ] 11 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.core.bbox.samplers import (BaseSampler, CombinedSampler, 2 | InstanceBalancedPosSampler, 3 | IoUBalancedNegSampler, OHEMSampler, 4 | PseudoSampler, RandomSampler, 5 | SamplingResult) 6 | from .iou_neg_piecewise_sampler import IoUNegPiecewiseSampler 7 | 8 | __all__ = [ 9 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 10 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 11 | 'OHEMSampler', 'SamplingResult', 'IoUNegPiecewiseSampler' 12 | ] 13 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/bbox/structures/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_box3d import BaseInstance3DBoxes 2 | from .box_3d_mode import Box3DMode 3 | from .cam_box3d import CameraInstance3DBoxes 4 | from .coord_3d_mode import Coord3DMode 5 | from .depth_box3d import DepthInstance3DBoxes 6 | from .lidar_box3d import LiDARInstance3DBoxes 7 | from .utils import (get_box_type, limit_period, points_cam2img, 8 | rotation_3d_in_axis, xywhr2xyxyr) 9 | 10 | __all__ = [ 11 | 'Box3DMode', 'BaseInstance3DBoxes', 'LiDARInstance3DBoxes', 12 | 'CameraInstance3DBoxes', 'DepthInstance3DBoxes', 'xywhr2xyxyr', 13 | 'get_box_type', 'rotation_3d_in_axis', 'limit_period', 'points_cam2img', 14 | 'Coord3DMode' 15 | ] 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .indoor_eval import indoor_eval 2 | from .kitti_utils import kitti_eval, kitti_eval_coco_style 3 | from .lyft_eval import lyft_eval 4 | from .seg_eval import seg_eval 5 | 6 | __all__ = [ 7 | 'kitti_eval_coco_style', 'kitti_eval', 'indoor_eval', 'lyft_eval', 8 | 'seg_eval' 9 | ] 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/evaluation/kitti_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .eval import kitti_eval, kitti_eval_coco_style 2 | 3 | __all__ = ['kitti_eval', 'kitti_eval_coco_style'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/points/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_points import BasePoints 2 | from .cam_points import CameraPoints 3 | from .depth_points import DepthPoints 4 | from .lidar_points import LiDARPoints 5 | 6 | __all__ = ['BasePoints', 'CameraPoints', 'DepthPoints', 'LiDARPoints'] 7 | 8 | 9 | def get_points_type(points_type): 10 | """Get the class of points according to coordinate type. 11 | 12 | Args: 13 | points_type (str): The type of points coordinate. 14 | The valid value are "CAMERA", "LIDAR", or "DEPTH". 15 | 16 | Returns: 17 | class: Points type. 18 | """ 19 | if points_type == 'CAMERA': 20 | points_cls = CameraPoints 21 | elif points_type == 'LIDAR': 22 | points_cls = LiDARPoints 23 | elif points_type == 'DEPTH': 24 | points_cls = DepthPoints 25 | else: 26 | raise ValueError('Only "points_type" of "CAMERA", "LIDAR", or "DEPTH"' 27 | f' are supported, got {points_type}') 28 | 29 | return points_cls 30 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.core.post_processing import (merge_aug_bboxes, merge_aug_masks, 2 | merge_aug_proposals, merge_aug_scores, 3 | multiclass_nms) 4 | from .box3d_nms import aligned_3d_nms, box3d_multiclass_nms, circle_nms 5 | from .merge_augs import merge_aug_bboxes_3d 6 | 7 | __all__ = [ 8 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 9 | 'merge_aug_scores', 'merge_aug_masks', 'box3d_multiclass_nms', 10 | 'aligned_3d_nms', 'merge_aug_bboxes_3d', 'circle_nms' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .gaussian import draw_heatmap_gaussian, gaussian_2d, gaussian_radius 2 | 3 | __all__ = ['gaussian_2d', 'gaussian_radius', 'draw_heatmap_gaussian'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/visualizer/__init__.py: -------------------------------------------------------------------------------- 1 | from .show_result import (show_multi_modality_result, show_result, 2 | show_seg_result) 3 | 4 | __all__ = ['show_result', 'show_seg_result', 'show_multi_modality_result'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/voxel/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_voxel_generator 2 | from .voxel_generator import VoxelGenerator 3 | 4 | __all__ = ['build_voxel_generator', 'VoxelGenerator'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/core/voxel/builder.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from . import voxel_generator 4 | 5 | 6 | def build_voxel_generator(cfg, **kwargs): 7 | """Builder of voxel generator.""" 8 | if isinstance(cfg, voxel_generator.VoxelGenerator): 9 | return cfg 10 | elif isinstance(cfg, dict): 11 | return mmcv.runner.obj_from_dict( 12 | cfg, voxel_generator, default_args=kwargs) 13 | else: 14 | raise TypeError('Invalid type {} for building a sampler'.format( 15 | type(cfg))) 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.datasets.pipelines import Compose 2 | from .dbsampler import DataBaseSampler 3 | from .formating import Collect3D, DefaultFormatBundle, DefaultFormatBundle3D 4 | from .loading import (LoadAnnotations3D, LoadImageFromFileMono3D, 5 | LoadMultiViewImageFromFiles, LoadPointsFromFile, 6 | LoadPointsFromMultiSweeps, NormalizePointsColor, 7 | PointSegClassMapping) 8 | from .test_time_aug import MultiScaleFlipAug3D 9 | from .transforms_3d import (BackgroundPointsFilter, GlobalAlignment, 10 | GlobalRotScaleTrans, IndoorPatchPointSample, 11 | IndoorPointSample, ObjectNoise, ObjectRangeFilter, 12 | ObjectSample, PointShuffle, PointsRangeFilter, 13 | RandomFlip3D, VoxelBasedPointSampler) 14 | 15 | __all__ = [ 16 | 'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans', 17 | 'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', 'Collect3D', 18 | 'Compose', 'LoadMultiViewImageFromFiles', 'LoadPointsFromFile', 19 | 'DefaultFormatBundle', 'DefaultFormatBundle3D', 'DataBaseSampler', 20 | 'NormalizePointsColor', 'LoadAnnotations3D', 'IndoorPointSample', 21 | 'PointSegClassMapping', 'MultiScaleFlipAug3D', 'LoadPointsFromMultiSweeps', 22 | 'BackgroundPointsFilter', 'VoxelBasedPointSampler', 'GlobalAlignment', 23 | 'IndoorPatchPointSample', 'LoadImageFromFileMono3D' 24 | ] 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/models/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .builder import (FUSION_LAYERS, MIDDLE_ENCODERS, VOXEL_ENCODERS, 3 | build_backbone, build_detector, build_fusion_layer, 4 | build_head, build_loss, build_middle_encoder, 5 | build_model, build_neck, build_roi_extractor, 6 | build_shared_head, build_voxel_encoder) 7 | from .decode_heads import * # noqa: F401,F403 8 | from .dense_heads import * # noqa: F401,F403 9 | from .detectors import * # noqa: F401,F403 10 | from .fusion_layers import * # noqa: F401,F403 11 | from .losses import * # noqa: F401,F403 12 | from .middle_encoders import * # noqa: F401,F403 13 | from .model_utils import * # noqa: F401,F403 14 | from .necks import * # noqa: F401,F403 15 | from .roi_heads import * # noqa: F401,F403 16 | from .segmentors import * # noqa: F401,F403 17 | from .voxel_encoders import * # noqa: F401,F403 18 | 19 | __all__ = [ 20 | 'VOXEL_ENCODERS', 'MIDDLE_ENCODERS', 'FUSION_LAYERS', 'build_backbone', 21 | 'build_neck', 'build_roi_extractor', 'build_shared_head', 'build_head', 22 | 'build_loss', 'build_detector', 'build_fusion_layer', 'build_model', 23 | 'build_middle_encoder', 'build_voxel_encoder' 24 | ] 25 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.backbones import SSDVGG, HRNet, ResNet, ResNetV1d, ResNeXt 2 | from .multi_backbone import MultiBackbone 3 | from .nostem_regnet import NoStemRegNet 4 | from .pointnet2_sa_msg import PointNet2SAMSG 5 | from .pointnet2_sa_ssg import PointNet2SASSG 6 | from .second import SECOND 7 | 8 | __all__ = [ 9 | 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'NoStemRegNet', 10 | 'SECOND', 'PointNet2SASSG', 'PointNet2SAMSG', 'MultiBackbone' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/backbones/base_pointnet.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta 2 | from mmcv.runner import load_checkpoint 3 | from torch import nn as nn 4 | 5 | 6 | class BasePointNet(nn.Module, metaclass=ABCMeta): 7 | """Base class for PointNet.""" 8 | 9 | def __init__(self): 10 | super(BasePointNet, self).__init__() 11 | self.fp16_enabled = False 12 | 13 | def init_weights(self, pretrained=None): 14 | """Initialize the weights of PointNet backbone.""" 15 | # Do not initialize the conv layers 16 | # to follow the original implementation 17 | if isinstance(pretrained, str): 18 | from mmdet3d.utils import get_root_logger 19 | logger = get_root_logger() 20 | load_checkpoint(self, pretrained, strict=False, logger=logger) 21 | 22 | @staticmethod 23 | def _split_point_feats(points): 24 | """Split coordinates and features of input points. 25 | 26 | Args: 27 | points (torch.Tensor): Point coordinates with features, 28 | with shape (B, N, 3 + input_feature_dim). 29 | 30 | Returns: 31 | torch.Tensor: Coordinates of input points. 32 | torch.Tensor: Features of input points. 33 | """ 34 | xyz = points[..., 0:3].contiguous() 35 | if points.size(-1) > 3: 36 | features = points[..., 3:].transpose(1, 2).contiguous() 37 | else: 38 | features = None 39 | 40 | return xyz, features 41 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .pointnet2_head import PointNet2Head 2 | 3 | __all__ = ['PointNet2Head'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/dense_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor3d_head import Anchor3DHead 2 | from .anchor_free_mono3d_head import AnchorFreeMono3DHead 3 | from .base_conv_bbox_head import BaseConvBboxHead 4 | from .base_mono3d_dense_head import BaseMono3DDenseHead 5 | from .centerpoint_head import CenterHead 6 | from .fcos_mono3d_head import FCOSMono3DHead 7 | from .free_anchor3d_head import FreeAnchor3DHead 8 | from .parta2_rpn_head import PartA2RPNHead 9 | from .shape_aware_head import ShapeAwareHead 10 | from .ssd_3d_head import SSD3DHead 11 | from .vote_head import VoteHead 12 | 13 | __all__ = [ 14 | 'Anchor3DHead', 'FreeAnchor3DHead', 'PartA2RPNHead', 'VoteHead', 15 | 'SSD3DHead', 'BaseConvBboxHead', 'CenterHead', 'ShapeAwareHead', 16 | 'BaseMono3DDenseHead', 'AnchorFreeMono3DHead', 'FCOSMono3DHead' 17 | ] 18 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Base3DDetector 2 | from .centerpoint import CenterPoint 3 | from .dynamic_voxelnet import DynamicVoxelNet 4 | from .fcos_mono3d import FCOSMono3D 5 | from .h3dnet import H3DNet 6 | from .imvotenet import ImVoteNet 7 | from .mvx_faster_rcnn import DynamicMVXFasterRCNN, MVXFasterRCNN 8 | from .mvx_two_stage import MVXTwoStageDetector 9 | from .parta2 import PartA2 10 | from .single_stage_mono3d import SingleStageMono3DDetector 11 | from .ssd3dnet import SSD3DNet 12 | from .votenet import VoteNet 13 | from .voxelnet import VoxelNet 14 | 15 | __all__ = [ 16 | 'Base3DDetector', 'VoxelNet', 'DynamicVoxelNet', 'MVXTwoStageDetector', 17 | 'DynamicMVXFasterRCNN', 'MVXFasterRCNN', 'PartA2', 'VoteNet', 'H3DNet', 18 | 'CenterPoint', 'SSD3DNet', 'ImVoteNet', 'SingleStageMono3DDetector', 19 | 'FCOSMono3D' 20 | ] 21 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/detectors/fcos_mono3d.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.builder import DETECTORS 2 | from .single_stage_mono3d import SingleStageMono3DDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class FCOSMono3D(SingleStageMono3DDetector): 7 | r"""FCOS3D `_ for monocular 3D object detection. 8 | 9 | Currently please refer to our entry on the 10 | `leaderboard ` # noqa 11 | """ 12 | 13 | def __init__(self, 14 | backbone, 15 | neck, 16 | bbox_head, 17 | train_cfg=None, 18 | test_cfg=None, 19 | pretrained=None): 20 | super(FCOSMono3D, self).__init__(backbone, neck, bbox_head, train_cfg, 21 | test_cfg, pretrained) 22 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/detectors/ssd3dnet.py: -------------------------------------------------------------------------------- 1 | from mmdet.models import DETECTORS 2 | from .votenet import VoteNet 3 | 4 | 5 | @DETECTORS.register_module() 6 | class SSD3DNet(VoteNet): 7 | """3DSSDNet model. 8 | 9 | https://arxiv.org/abs/2002.10187.pdf 10 | """ 11 | 12 | def __init__(self, 13 | backbone, 14 | bbox_head=None, 15 | train_cfg=None, 16 | test_cfg=None, 17 | pretrained=None): 18 | super(SSD3DNet, self).__init__( 19 | backbone=backbone, 20 | bbox_head=bbox_head, 21 | train_cfg=train_cfg, 22 | test_cfg=test_cfg, 23 | pretrained=pretrained) 24 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/detectors/two_stage.py: -------------------------------------------------------------------------------- 1 | from mmdet.models import DETECTORS, TwoStageDetector 2 | from .base import Base3DDetector 3 | 4 | 5 | @DETECTORS.register_module() 6 | class TwoStage3DDetector(Base3DDetector, TwoStageDetector): 7 | """Base class of two-stage 3D detector. 8 | 9 | It inherits original ``:class:TwoStageDetector`` and 10 | ``:class:Base3DDetector``. This class could serve as a base class for all 11 | two-stage 3D detectors. 12 | """ 13 | 14 | def __init__(self, **kwargs): 15 | super(TwoStage3DDetector, self).__init__(**kwargs) 16 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/flow_head/README.md: -------------------------------------------------------------------------------- 1 | # Flow Head 2 | 3 | ## Dependencies 4 | 5 | ``` 6 | kaolin v0.1 7 | ``` 8 | 9 | ## How to use? 10 | 11 | Put this folder into `${MMDETECTION3D}/mmdet3d/models` and update relavant files according to the [Tutorial](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/tutorials/customize_models.md) 12 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/flow_head/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # from mmdet.models.necks.fpn import FPN 3 | from .flownet3d_head import FlowNet3D 4 | 5 | __all__ = ['FlowNet3D'] 6 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/flow_head/flow_embedding.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import nn as nn 4 | from .group import Group 5 | 6 | class FlowEmbedding(nn.Module): 7 | def __init__(self, num_samples, in_channels, out_channels): 8 | super(FlowEmbedding, self).__init__() 9 | 10 | self.num_samples = num_samples 11 | 12 | self.group = Group(None, self.num_samples, knn=True) 13 | 14 | layers = [] 15 | out_channels = [2*in_channels+3, *out_channels] 16 | for i in range(1, len(out_channels)): 17 | layers += [nn.Conv2d(out_channels[i - 1], out_channels[i], 1, bias=True), nn.BatchNorm2d(out_channels[i], eps=0.001), nn.ReLU()] 18 | self.conv = nn.Sequential(*layers) 19 | 20 | def forward(self, points1, points2, features1, features2): 21 | new_features = self.group(points2, points1, features2) 22 | new_features = torch.cat([new_features, features1.unsqueeze(3).expand(-1, -1, -1, self.num_samples)], dim=1) 23 | new_features = self.conv(new_features) 24 | new_features = new_features.max(dim=3)[0] 25 | return new_features 26 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/flow_head/load_weights.py: -------------------------------------------------------------------------------- 1 | # from __future__ import absolute_import 2 | import torch 3 | import os 4 | import sys 5 | 6 | # sys.path.append(os.getcwd()) 7 | 8 | from flownet3d_head import FlowNet3D 9 | 10 | path = 'net_head_tf_nofp.pth' 11 | model = FlowNet3D() 12 | pretrained_weights = torch.load(path) 13 | model_dict = model.state_dict() 14 | state_dict = {k : v for k, v in pretrained_weights.items() if k in model_dict.keys()} 15 | model_dict.update(state_dict) 16 | model.load_state_dict(model_dict) 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/flow_head/pointnet_featprop.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn as nn 3 | 4 | from kaolin.models.PointNet2 import group_gather_by_index 5 | from kaolin.models.PointNet2 import three_nn 6 | 7 | class FeaturePropagation(nn.Module): 8 | def __init__(self, in_channels1, in_channels2, out_channels): 9 | super(FeaturePropagation, self).__init__() 10 | 11 | layers = [] 12 | out_channels = [in_channels1+in_channels2, *out_channels] 13 | for i in range(1, len(out_channels)): 14 | layers += [nn.Conv2d(out_channels[i - 1], out_channels[i], 1, bias=True), nn.BatchNorm2d(out_channels[i], eps=0.001), nn.ReLU()] 15 | self.conv = nn.Sequential(*layers) 16 | 17 | def forward(self, points1, points2, features1, features2): 18 | dist, ind = three_nn(points2.permute(0, 2, 1).contiguous(), points1.permute(0, 2, 1).contiguous()) 19 | dist = dist * dist 20 | dist[dist < 1e-10] = 1e-10 21 | inverse_dist = 1.0 / dist 22 | norm = torch.sum(inverse_dist, dim=2, keepdim=True) 23 | weights = inverse_dist / norm 24 | #new_features = three_interpolate(features1, ind, weights) # wrong gradients 25 | new_features = torch.sum(group_gather_by_index(features1, ind) * weights.unsqueeze(1), dim = 3) 26 | new_features = torch.cat([new_features, features2], dim=1) 27 | new_features = self.conv(new_features.unsqueeze(3)).squeeze(3) 28 | return new_features 29 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/flow_head/pointnet_setconv.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from torch import nn as nn 4 | 5 | from .group import Group 6 | from .sample import Sample 7 | 8 | class SetConv(nn.Module): 9 | def __init__(self, num_points, radius, num_samples, in_channels, out_channels): 10 | super(SetConv, self).__init__() 11 | 12 | self.sample = Sample(num_points) 13 | self.group = Group(radius, num_samples) 14 | 15 | layers = [] 16 | out_channels = [in_channels+3, *out_channels] 17 | for i in range(1, len(out_channels)): 18 | layers += [nn.Conv2d(out_channels[i - 1], out_channels[i], 1, bias=True), nn.BatchNorm2d(out_channels[i], eps=0.001), nn.ReLU()] 19 | self.conv = nn.Sequential(*layers) 20 | 21 | def forward(self, points, features): 22 | new_points = self.sample(points) 23 | new_features = self.group(points, new_points, features) 24 | new_features = self.conv(new_features) 25 | new_features = new_features.max(dim=3)[0] 26 | return new_points, new_features 27 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/flow_head/sample.py: -------------------------------------------------------------------------------- 1 | from torch import nn as nn 2 | 3 | from kaolin.models.PointNet2 import furthest_point_sampling 4 | from kaolin.models.PointNet2 import fps_gather_by_index 5 | 6 | class Sample(nn.Module): 7 | def __init__(self, num_points): 8 | super(Sample, self).__init__() 9 | 10 | self.num_points = num_points 11 | 12 | def forward(self, points): 13 | new_points_ind = furthest_point_sampling(points.permute(0, 2, 1).contiguous(), self.num_points) 14 | new_points = fps_gather_by_index(points.contiguous(), new_points_ind) 15 | return new_points -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/fusion_layers/__init__.py: -------------------------------------------------------------------------------- 1 | from .coord_transform import (apply_3d_transformation, bbox_2d_transform, 2 | coord_2d_transform) 3 | from .point_fusion import PointFusion 4 | from .vote_fusion import VoteFusion 5 | 6 | __all__ = [ 7 | 'PointFusion', 'VoteFusion', 'apply_3d_transformation', 8 | 'bbox_2d_transform', 'coord_2d_transform' 9 | ] 10 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.losses import FocalLoss, SmoothL1Loss, binary_cross_entropy 2 | from .axis_aligned_iou_loss import AxisAlignedIoULoss, axis_aligned_iou_loss 3 | from .chamfer_distance import ChamferDistance, chamfer_distance 4 | from .cycle_loss import Cycle_Loss, cycle_loss 5 | __all__ = [ 6 | 'FocalLoss', 'SmoothL1Loss', 'binary_cross_entropy', 'ChamferDistance', 7 | 'chamfer_distance', 'axis_aligned_iou_loss', 'AxisAlignedIoULoss', 'Cycle_Loss' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/middle_encoders/__init__.py: -------------------------------------------------------------------------------- 1 | from .pillar_scatter import PointPillarsScatter 2 | from .sparse_encoder import SparseEncoder 3 | from .sparse_unet import SparseUNet 4 | 5 | __all__ = ['PointPillarsScatter', 'SparseEncoder', 'SparseUNet'] 6 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/model_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .vote_module import VoteModule 2 | 3 | __all__ = ['VoteModule'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.necks.fpn import FPN 2 | from .second_fpn import SECONDFPN 3 | 4 | __all__ = ['FPN', 'SECONDFPN'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_3droi_head import Base3DRoIHead 2 | from .bbox_heads import PartA2BboxHead 3 | from .h3d_roi_head import H3DRoIHead 4 | from .mask_heads import PointwiseSemanticHead, PrimitiveHead 5 | from .part_aggregation_roi_head import PartAggregationROIHead 6 | from .roi_extractors import Single3DRoIAwareExtractor, SingleRoIExtractor 7 | 8 | __all__ = [ 9 | 'Base3DRoIHead', 'PartAggregationROIHead', 'PointwiseSemanticHead', 10 | 'Single3DRoIAwareExtractor', 'PartA2BboxHead', 'SingleRoIExtractor', 11 | 'H3DRoIHead', 'PrimitiveHead' 12 | ] 13 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/roi_heads/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.roi_heads.bbox_heads import (BBoxHead, ConvFCBBoxHead, 2 | DoubleConvFCBBoxHead, 3 | Shared2FCBBoxHead, 4 | Shared4Conv1FCBBoxHead) 5 | from .h3d_bbox_head import H3DBboxHead 6 | from .parta2_bbox_head import PartA2BboxHead 7 | 8 | __all__ = [ 9 | 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 10 | 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'PartA2BboxHead', 11 | 'H3DBboxHead' 12 | ] 13 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/roi_heads/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .pointwise_semantic_head import PointwiseSemanticHead 2 | from .primitive_head import PrimitiveHead 3 | 4 | __all__ = ['PointwiseSemanticHead', 'PrimitiveHead'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/roi_heads/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | from mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor 2 | from .single_roiaware_extractor import Single3DRoIAwareExtractor 3 | 4 | __all__ = ['SingleRoIExtractor', 'Single3DRoIAwareExtractor'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Base3DSegmentor 2 | from .encoder_decoder import EncoderDecoder3D 3 | 4 | __all__ = ['Base3DSegmentor', 'EncoderDecoder3D'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .clip_sigmoid import clip_sigmoid 2 | from .mlp import MLP 3 | 4 | __all__ = ['clip_sigmoid', 'MLP'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/utils/clip_sigmoid.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def clip_sigmoid(x, eps=1e-4): 5 | """Sigmoid function for input feature. 6 | 7 | Args: 8 | x (torch.Tensor): Input feature map with the shape of [B, N, H, W]. 9 | eps (float): Lower bound of the range to be clamped to. Defaults 10 | to 1e-4. 11 | 12 | Returns: 13 | torch.Tensor: Feature map after sigmoid. 14 | """ 15 | y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) 16 | return y 17 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/voxel_encoders/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/models/voxel_encoders/.DS_Store -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/models/voxel_encoders/__init__.py: -------------------------------------------------------------------------------- 1 | from .pillar_encoder import PillarFeatureNet 2 | from .voxel_encoder import DynamicSimpleVFE, DynamicVFE, HardSimpleVFE, HardVFE 3 | 4 | __all__ = [ 5 | 'PillarFeatureNet', 'HardVFE', 'DynamicVFE', 'HardSimpleVFE', 6 | 'DynamicSimpleVFE' 7 | ] 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/ball_query/__init__.py: -------------------------------------------------------------------------------- 1 | from .ball_query import ball_query 2 | 3 | __all__ = ['ball_query'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/ball_query/ball_query_ext.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/ball_query/ball_query_ext.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/furthest_point_sample/__init__.py: -------------------------------------------------------------------------------- 1 | from .furthest_point_sample import (furthest_point_sample, 2 | furthest_point_sample_with_dist) 3 | from .points_sampler import Points_Sampler 4 | 5 | __all__ = [ 6 | 'furthest_point_sample', 'furthest_point_sample_with_dist', 7 | 'Points_Sampler' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/furthest_point_sample/furthest_point_sample_ext.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/furthest_point_sample/furthest_point_sample_ext.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/furthest_point_sample/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def calc_square_dist(point_feat_a, point_feat_b, norm=True): 5 | """Calculating square distance between a and b. 6 | 7 | Args: 8 | point_feat_a (Tensor): (B, N, C) Feature vector of each point. 9 | point_feat_b (Tensor): (B, M, C) Feature vector of each point. 10 | norm (Bool): Whether to normalize the distance. 11 | Default: True. 12 | 13 | Returns: 14 | Tensor: (B, N, M) Distance between each pair points. 15 | """ 16 | length_a = point_feat_a.shape[1] 17 | length_b = point_feat_b.shape[1] 18 | num_channel = point_feat_a.shape[-1] 19 | # [bs, n, 1] 20 | a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1) 21 | # [bs, 1, m] 22 | b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1) 23 | a_square = a_square.repeat((1, 1, length_b)) # [bs, n, m] 24 | b_square = b_square.repeat((1, length_a, 1)) # [bs, n, m] 25 | 26 | coor = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2)) 27 | 28 | dist = a_square + b_square - 2 * coor 29 | if norm: 30 | dist = torch.sqrt(dist) / num_channel 31 | return dist 32 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/gather_points/__init__.py: -------------------------------------------------------------------------------- 1 | from .gather_points import gather_points 2 | 3 | __all__ = ['gather_points'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/gather_points/gather_points_ext.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/gather_points/gather_points_ext.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/group_points/__init__.py: -------------------------------------------------------------------------------- 1 | from .group_points import GroupAll, QueryAndGroup, grouping_operation 2 | 3 | __all__ = ['QueryAndGroup', 'GroupAll', 'grouping_operation'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/group_points/group_points_ext.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/group_points/group_points_ext.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/interpolate/__init__.py: -------------------------------------------------------------------------------- 1 | from .three_interpolate import three_interpolate 2 | from .three_nn import three_nn 3 | 4 | __all__ = ['three_nn', 'three_interpolate'] 5 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/interpolate/interpolate_ext.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/interpolate/interpolate_ext.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/interpolate/three_nn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | from typing import Tuple 4 | 5 | from . import interpolate_ext 6 | 7 | 8 | class ThreeNN(Function): 9 | 10 | @staticmethod 11 | def forward(ctx, target: torch.Tensor, 12 | source: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: 13 | """Find the top-3 nearest neighbors of the target set from the source 14 | set. 15 | 16 | Args: 17 | target (Tensor): shape (B, N, 3), points set that needs to 18 | find the nearest neighbors. 19 | source (Tensor): shape (B, M, 3), points set that is used 20 | to find the nearest neighbors of points in target set. 21 | 22 | Returns: 23 | Tensor: shape (B, N, 3), L2 distance of each point in target 24 | set to their corresponding nearest neighbors. 25 | """ 26 | assert target.is_contiguous() 27 | assert source.is_contiguous() 28 | 29 | B, N, _ = target.size() 30 | m = source.size(1) 31 | dist2 = torch.cuda.FloatTensor(B, N, 3) 32 | idx = torch.cuda.IntTensor(B, N, 3) 33 | 34 | interpolate_ext.three_nn_wrapper(B, N, m, target, source, dist2, idx) 35 | 36 | ctx.mark_non_differentiable(idx) 37 | 38 | return torch.sqrt(dist2), idx 39 | 40 | @staticmethod 41 | def backward(ctx, a=None, b=None): 42 | return None, None 43 | 44 | 45 | three_nn = ThreeNN.apply 46 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/iou3d/__init__.py: -------------------------------------------------------------------------------- 1 | from .iou3d_utils import boxes_iou_bev, nms_gpu, nms_normal_gpu 2 | 3 | __all__ = ['boxes_iou_bev', 'nms_gpu', 'nms_normal_gpu'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/iou3d/iou3d_cuda.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/iou3d/iou3d_cuda.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/knn/__init__.py: -------------------------------------------------------------------------------- 1 | from .knn import knn 2 | 3 | __all__ = ['knn'] 4 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/knn/knn_ext.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/knn/knn_ext.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/pointnet_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_sa_module 2 | from .point_fp_module import PointFPModule 3 | from .point_sa_module import PointSAModule, PointSAModuleMSG 4 | 5 | __all__ = [ 6 | 'build_sa_module', 'PointSAModuleMSG', 'PointSAModule', 'PointFPModule' 7 | ] 8 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/pointnet_modules/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry 2 | 3 | SA_MODULES = Registry('point_sa_module') 4 | 5 | 6 | def build_sa_module(cfg, *args, **kwargs): 7 | """Build PointNet2 set abstraction (SA) module. 8 | 9 | Args: 10 | cfg (None or dict): The SA module config, which should contain: 11 | - type (str): Module type. 12 | - module args: Args needed to instantiate an SA module. 13 | args (argument list): Arguments passed to the `__init__` 14 | method of the corresponding module. 15 | kwargs (keyword arguments): Keyword arguments passed to the `__init__` 16 | method of the corresponding SA module . 17 | 18 | Returns: 19 | nn.Module: Created SA module. 20 | """ 21 | if cfg is None: 22 | cfg_ = dict(type='PointSAModule') 23 | else: 24 | if not isinstance(cfg, dict): 25 | raise TypeError('cfg must be a dict') 26 | if 'type' not in cfg: 27 | raise KeyError('the cfg dict must contain the key "type"') 28 | cfg_ = cfg.copy() 29 | 30 | module_type = cfg_.pop('type') 31 | if module_type not in SA_MODULES: 32 | raise KeyError(f'Unrecognized module type {module_type}') 33 | else: 34 | sa_module = SA_MODULES.get(module_type) 35 | 36 | module = sa_module(*args, **kwargs, **cfg_) 37 | 38 | return module 39 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/roiaware_pool3d/__init__.py: -------------------------------------------------------------------------------- 1 | from .points_in_boxes import (points_in_boxes_batch, points_in_boxes_cpu, 2 | points_in_boxes_gpu) 3 | from .roiaware_pool3d import RoIAwarePool3d 4 | 5 | __all__ = [ 6 | 'RoIAwarePool3d', 'points_in_boxes_gpu', 'points_in_boxes_cpu', 7 | 'points_in_boxes_batch' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/roiaware_pool3d/roiaware_pool3d_ext.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emecercelik/ssl-3d-detection/958b21e7e730bfac0a5caa5378b0a3d845fd6891/mmdetection3d/mmdetection3d/mmdet3d/ops/roiaware_pool3d/roiaware_pool3d_ext.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/spconv/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Yan Yan 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from .conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d, 16 | SparseConvTranspose3d, SparseInverseConv2d, 17 | SparseInverseConv3d, SubMConv2d, SubMConv3d) 18 | from .modules import SparseModule, SparseSequential 19 | from .pool import SparseMaxPool2d, SparseMaxPool3d 20 | from .structure import SparseConvTensor, scatter_nd 21 | 22 | __all__ = [ 23 | 'SparseConv2d', 24 | 'SparseConv3d', 25 | 'SubMConv2d', 26 | 'SubMConv3d', 27 | 'SparseConvTranspose2d', 28 | 'SparseConvTranspose3d', 29 | 'SparseInverseConv2d', 30 | 'SparseInverseConv3d', 31 | 'SparseModule', 32 | 'SparseSequential', 33 | 'SparseMaxPool2d', 34 | 'SparseMaxPool3d', 35 | 'SparseConvTensor', 36 | 'scatter_nd', 37 | ] 38 | -------------------------------------------------------------------------------- /mmdetection3d/mmdetection3d/mmdet3d/ops/spconv/include/spconv/mp_helper.h: -------------------------------------------------------------------------------- 1 | #ifndef MP_HELPER_H_ 2 | #define MP_HELPER_H_ 3 | #include 4 | #include 5 | 6 | namespace spconv { 7 | template 8 | struct mp_list {}; 9 | 10 | template 11 | using mp_list_c = mp_list...>; 12 | 13 | namespace detail { 14 | 15 | template 16 | constexpr F mp_for_each_impl(mp_list, F &&f) { 17 | return std::initializer_list{(f(T()), 0)...}, std::forward(f); 18 | } 19 | 20 | template 21 | constexpr F mp_for_each_impl(mp_list<>, F &&f) { 22 | return std::forward(f); 23 | } 24 | 25 | } // namespace detail 26 | 27 | namespace detail { 28 | 29 | template class B> 30 | struct mp_rename_impl { 31 | // An error "no type named 'type'" here means that the first argument to 32 | // mp_rename is not a list 33 | }; 34 | 35 | template