├── .circleci
├── config.yml
├── docker
│ └── Dockerfile
└── test.yml
├── .dev_scripts
├── benchmark_full_models.txt
├── benchmark_options.py
├── benchmark_train_models.txt
├── covignore.cfg
├── diff_coverage_test.sh
├── gather_models.py
├── gen_benchmark_script.py
├── linter.sh
├── test_benchmark.sh
└── train_benchmark.sh
├── .github
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── ISSUE_TEMPLATE
│ ├── 1-bug-report.yml
│ ├── 2-feature_request.yml
│ ├── 3-new-model.yml
│ ├── 4-documentation.yml
│ └── config.yml
├── pull_request_template.md
└── workflows
│ ├── deploy.yml
│ ├── lint.yml
│ ├── merge_stage_test.yml
│ ├── pr_stage_test.yml
│ └── test_mim.yml
├── .gitignore
├── .pre-commit-config-zh-cn.yaml
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── CITATION.cff
├── LICENSE
├── MANIFEST.in
├── README.md
├── README_zh-CN.md
├── configs
├── 3dssd
│ ├── 3dssd_4xb4_kitti-3d-car.py
│ ├── README.md
│ └── metafile.yml
├── _base_
│ ├── datasets
│ │ ├── kitti-3d-3class.py
│ │ ├── kitti-3d-car.py
│ │ ├── kitti-mono3d.py
│ │ ├── lyft-3d-range100.py
│ │ ├── lyft-3d.py
│ │ ├── nuim-instance.py
│ │ ├── nus-3d.py
│ │ ├── nus-mono3d.py
│ │ ├── s3dis-3d.py
│ │ ├── s3dis-seg.py
│ │ ├── scannet-3d.py
│ │ ├── scannet-seg.py
│ │ ├── semantickitti.py
│ │ ├── sunrgbd-3d.py
│ │ ├── waymoD3-fov-mono3d-3class.py
│ │ ├── waymoD3-mv-mono3d-3class.py
│ │ ├── waymoD5-3d-3class.py
│ │ ├── waymoD5-3d-car.py
│ │ ├── waymoD5-fov-mono3d-3class.py
│ │ ├── waymoD5-mv-mono3d-3class.py
│ │ └── waymoD5-mv3d-3class.py
│ ├── default_runtime.py
│ ├── models
│ │ ├── 3dssd.py
│ │ ├── cascade-mask-rcnn_r50_fpn.py
│ │ ├── centerpoint_pillar02_second_secfpn_nus.py
│ │ ├── centerpoint_voxel01_second_secfpn_nus.py
│ │ ├── cylinder3d.py
│ │ ├── dgcnn.py
│ │ ├── fcaf3d.py
│ │ ├── fcos3d.py
│ │ ├── groupfree3d.py
│ │ ├── h3dnet.py
│ │ ├── imvotenet.py
│ │ ├── mask-rcnn_r50_fpn.py
│ │ ├── minkunet.py
│ │ ├── multiview_dfm.py
│ │ ├── paconv_ssg-cuda.py
│ │ ├── paconv_ssg.py
│ │ ├── parta2.py
│ │ ├── pgd.py
│ │ ├── point_rcnn.py
│ │ ├── pointnet2_msg.py
│ │ ├── pointnet2_ssg.py
│ │ ├── pointpillars_hv_fpn_lyft.py
│ │ ├── pointpillars_hv_fpn_nus.py
│ │ ├── pointpillars_hv_fpn_range100_lyft.py
│ │ ├── pointpillars_hv_secfpn_kitti.py
│ │ ├── pointpillars_hv_secfpn_waymo.py
│ │ ├── second_hv_secfpn_kitti.py
│ │ ├── second_hv_secfpn_waymo.py
│ │ ├── smoke.py
│ │ ├── spvcnn.py
│ │ └── votenet.py
│ └── schedules
│ │ ├── cosine.py
│ │ ├── cyclic-20e.py
│ │ ├── cyclic-40e.py
│ │ ├── mmdet-schedule-1x.py
│ │ ├── schedule-2x.py
│ │ ├── schedule-3x.py
│ │ ├── seg-cosine-100e.py
│ │ ├── seg-cosine-150e.py
│ │ ├── seg-cosine-200e.py
│ │ └── seg-cosine-50e.py
├── benchmark
│ ├── hv_PartA2_secfpn_4x8_cyclic_80e_pcdet_kitti-3d-3class.py
│ ├── hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py
│ ├── hv_pointpillars_secfpn_4x8_80e_pcdet_kitti-3d-3class.py
│ └── hv_second_secfpn_4x8_80e_pcdet_kitti-3d-3class.py
├── centerpoint
│ ├── README.md
│ ├── centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-flip-tta-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-flip-tta-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-tta-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py
│ ├── centerpoint_voxel01_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py
│ └── metafile.yml
├── cylinder3d
│ ├── README.md
│ ├── cylinder3d_4xb4-3x_semantickitti.py
│ ├── cylinder3d_8xb2-laser-polar-mix-3x_semantickitti.py
│ └── metafile.yml
├── dgcnn
│ ├── README.md
│ ├── dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py
│ ├── dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py
│ ├── dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py
│ ├── dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py
│ ├── dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py
│ ├── dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py
│ └── metafile.yml
├── dynamic_voxelization
│ ├── README.md
│ ├── metafile.yml
│ ├── pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py
│ ├── second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py
│ └── second_dv_secfpn_8xb6-80e_kitti-3d-car.py
├── fcaf3d
│ ├── README.md
│ ├── fcaf3d_2xb8_s3dis-3d-5class.py
│ ├── fcaf3d_2xb8_scannet-3d-18class.py
│ ├── fcaf3d_2xb8_sunrgbd-3d-10class.py
│ └── metafile.yml
├── fcos3d
│ ├── README.md
│ ├── fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py
│ ├── fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py
│ └── metafile.yml
├── free_anchor
│ ├── README.md
│ ├── metafile.yml
│ ├── pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py
│ ├── pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py
│ ├── pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py
│ ├── pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py
│ ├── pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-strong-aug-3x_nus-3d.py
│ └── pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py
├── groupfree3d
│ ├── README.md
│ ├── groupfree3d_head-L12-O256_4xb8_scannet-seg.py
│ ├── groupfree3d_head-L6-O256_4xb8_scannet-seg.py
│ ├── groupfree3d_w2x-head-L12-O256_4xb8_scannet-seg.py
│ ├── groupfree3d_w2x-head-L12-O512_4xb8_scannet-seg.py
│ └── metafile.yml
├── h3dnet
│ ├── README.md
│ ├── h3dnet_8xb3_scannet-seg.py
│ └── metafile.yml
├── imvotenet
│ ├── README.md
│ ├── imvotenet_faster-rcnn-r50_fpn_4xb2_sunrgbd-3d.py
│ ├── imvotenet_stage2_8xb16_sunrgbd-3d.py
│ └── metafile.yml
├── imvoxelnet
│ ├── README.md
│ ├── imvoxelnet_2xb4_sunrgbd-3d-10class.py
│ ├── imvoxelnet_8xb4_kitti-3d-car.py
│ └── metafile.yml
├── minkunet
│ ├── README.md
│ ├── metafile.yml
│ ├── minkunet18_w16_torchsparse_8xb2-amp-15e_semantickitti.py
│ ├── minkunet18_w20_torchsparse_8xb2-amp-15e_semantickitti.py
│ ├── minkunet18_w32_torchsparse_8xb2-amp-15e_semantickitti.py
│ ├── minkunet34_w32_minkowski_8xb2-laser-polar-mix-3x_semantickitti.py
│ ├── minkunet34_w32_spconv_8xb2-amp-laser-polar-mix-3x_semantickitti.py
│ ├── minkunet34_w32_spconv_8xb2-laser-polar-mix-3x_semantickitti.py
│ ├── minkunet34_w32_torchsparse_8xb2-amp-laser-polar-mix-3x_semantickitti.py
│ ├── minkunet34_w32_torchsparse_8xb2-laser-polar-mix-3x_semantickitti.py
│ └── minkunet34v2_w32_torchsparse_8xb2-amp-laser-polar-mix-3x_semantickitti.py
├── monoflex
│ ├── README.md
│ └── metafile.yml
├── mvfcos3d
│ ├── README.md
│ ├── multiview-fcos3d_r101-dcn_8xb2_waymoD5-3d-3class.py
│ └── multiview-fcos3d_r101-dcn_centerhead_16xb2_waymoD5-3d-3class.py
├── mvxnet
│ ├── README.md
│ ├── metafile.yml
│ └── mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py
├── nuimages
│ ├── README.md
│ ├── cascade-mask-rcnn-r50-fpn_coco-20e_nuim.py
│ ├── cascade-mask-rcnn_r101_fpn_1x_nuim.py
│ ├── cascade-mask-rcnn_r50_fpn_1x_nuim.py
│ ├── cascade-mask-rcnn_r50_fpn_coco-20e-1x_nuim.py
│ ├── cascade-mask-rcnn_x101_32x4d_fpn_1x_nuim.py
│ ├── htc_r50_fpn_1x_nuim.py
│ ├── htc_r50_fpn_coco-20e-1x_nuim.py
│ ├── htc_r50_fpn_coco-20e_nuim.py
│ ├── htc_r50_fpn_head-without-semantic_1x_nuim.py
│ ├── htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e-1xb16_nuim.py
│ ├── mask-rcnn_r101_fpn_1x_nuim.py
│ ├── mask-rcnn_r50_caffe_fpn_1x_nuim.py
│ ├── mask-rcnn_r50_caffe_fpn_coco-3x_1x_nuim.py
│ ├── mask-rcnn_r50_caffe_fpn_coco-3x_20e_nuim.py
│ ├── mask-rcnn_r50_fpn_1x_nuim.py
│ ├── mask-rcnn_r50_fpn_coco-2x_1x_nuim.py
│ ├── mask-rcnn_r50_fpn_coco-2x_1x_nus-2d.py
│ ├── mask-rcnn_x101_32x4d_fpn_1x_nuim.py
│ └── metafile.yml
├── paconv
│ ├── README.md
│ ├── metafile.yml
│ ├── paconv_ssg-cuda_8xb8-cosine-200e_s3dis-seg.py
│ └── paconv_ssg_8xb8-cosine-150e_s3dis-seg.py
├── parta2
│ ├── README.md
│ ├── metafile.yml
│ ├── parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-3class.py
│ └── parta2_hv_secfpn_8xb2-cyclic-80e_kitti-3d-car.py
├── pgd
│ ├── README.md
│ ├── metafile.yml
│ ├── pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py
│ ├── pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py
│ ├── pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py
│ ├── pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py
│ ├── pgd_r101-caffe_fpn_head-gn_4xb3-4x_kitti-mono3d.py
│ ├── pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py
│ ├── pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py
│ ├── pgd_r101_fpn_gn-head_dcn_8xb3-2x_waymoD3-fov-mono3d.py
│ └── pgd_r101_fpn_gn-head_dcn_8xb3-2x_waymoD3-mv-mono3d.py
├── point_rcnn
│ ├── README.md
│ ├── metafile.yml
│ └── point-rcnn_8xb2_kitti-3d-3class.py
├── pointnet2
│ ├── README.md
│ ├── metafile.yml
│ ├── pointnet2_msg_2xb16-cosine-250e_scannet-seg-xyz-only.py
│ ├── pointnet2_msg_2xb16-cosine-250e_scannet-seg.py
│ ├── pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py
│ ├── pointnet2_ssg_2xb16-cosine-200e_scannet-seg-xyz-only.py
│ ├── pointnet2_ssg_2xb16-cosine-200e_scannet-seg.py
│ └── pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py
├── pointpillars
│ ├── README.md
│ ├── metafile.yml
│ ├── pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d-range100.py
│ ├── pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py
│ ├── pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py
│ ├── pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py
│ ├── pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py
│ ├── pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py
│ ├── pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py
│ ├── pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-car.py
│ ├── pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py
│ ├── pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-car.py
│ ├── pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d-range100.py
│ ├── pointpillars_hv_secfpn_sbn-all_8xb2-2x_lyft-3d.py
│ ├── pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py
│ └── pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py
├── pv_rcnn
│ ├── README.md
│ ├── metafile.yml
│ └── pv_rcnn_8xb2-80e_kitti-3d-3class.py
├── regnet
│ ├── README.md
│ ├── metafile.yml
│ ├── pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py
│ ├── pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb2-2x_lyft-3d.py
│ ├── pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py
│ ├── pointpillars_hv_regnet-400mf_fpn_sbn-all_range100_8xb2-2x_lyft-3d.py
│ ├── pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb2-2x_lyft-3d.py
│ ├── pointpillars_hv_regnet-400mf_secfpn_sbn-all_8xb4-2x_nus-3d.py
│ └── pointpillars_hv_regnet-400mf_secfpn_sbn-all_range100_8xb2-2x_lyft-3d.py
├── sassd
│ ├── README.md
│ └── sassd_8xb6-80e_kitti-3d-3class.py
├── second
│ ├── README.md
│ ├── metafile.yml
│ ├── second_hv_secfpn_8xb6-80e_kitti-3d-3class.py
│ ├── second_hv_secfpn_8xb6-80e_kitti-3d-car.py
│ ├── second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py
│ ├── second_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py
│ └── second_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py
├── smoke
│ ├── README.md
│ ├── metafile.yml
│ └── smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py
├── spvcnn
│ ├── README.md
│ ├── metafile.yml
│ ├── spvcnn_w16_8xb2-amp-15e_semantickitti.py
│ ├── spvcnn_w20_8xb2-amp-15e_semantickitti.py
│ ├── spvcnn_w32_8xb2-amp-15e_semantickitti.py
│ └── spvcnn_w32_8xb2-amp-laser-polar-mix-3x_semantickitti.py
├── ssn
│ ├── README.md
│ ├── metafile.yml
│ ├── ssn_hv_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py
│ ├── ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py
│ ├── ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py
│ └── ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py
└── votenet
│ ├── README.md
│ ├── metafile.yml
│ ├── votenet_8xb16_sunrgbd-3d.py
│ ├── votenet_8xb8_scannet-3d.py
│ └── votenet_head-iouloss_8xb8_scannet-3d.py
├── data
├── lyft
│ ├── test.txt
│ ├── train.txt
│ └── val.txt
├── s3dis
│ ├── README.md
│ ├── collect_indoor3d_data.py
│ ├── indoor3d_util.py
│ └── meta_data
│ │ ├── anno_paths.txt
│ │ └── class_names.txt
├── scannet
│ ├── README.md
│ ├── batch_load_scannet_data.py
│ ├── extract_posed_images.py
│ ├── load_scannet_data.py
│ ├── meta_data
│ │ ├── scannet_means.npz
│ │ ├── scannet_train.txt
│ │ ├── scannetv2-labels.combined.tsv
│ │ ├── scannetv2_test.txt
│ │ ├── scannetv2_train.txt
│ │ └── scannetv2_val.txt
│ └── scannet_utils.py
└── sunrgbd
│ ├── README.md
│ └── matlab
│ ├── extract_rgbd_data_v1.m
│ ├── extract_rgbd_data_v2.m
│ └── extract_split.m
├── dataset-index.yml
├── demo
├── data
│ ├── kitti
│ │ ├── 000008.bin
│ │ ├── 000008.pkl
│ │ ├── 000008.png
│ │ └── 000008.txt
│ ├── nuscenes
│ │ ├── n015-2018-07-24-11-22-45+0800.pkl
│ │ ├── n015-2018-07-24-11-22-45+0800__CAM_BACK_LEFT__1532402927647423.jpg
│ │ ├── n015-2018-07-24-11-22-45+0800__CAM_BACK_RIGHT__1532402927627893.jpg
│ │ ├── n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg
│ │ ├── n015-2018-07-24-11-22-45+0800__CAM_FRONT_LEFT__1532402927604844.jpg
│ │ ├── n015-2018-07-24-11-22-45+0800__CAM_FRONT_RIGHT__1532402927620339.jpg
│ │ ├── n015-2018-07-24-11-22-45+0800__CAM_FRONT__1532402927612460.jpg
│ │ └── n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin
│ ├── scannet
│ │ └── scene0000_00.bin
│ └── sunrgbd
│ │ ├── 000017.bin
│ │ ├── 000017.jpg
│ │ └── sunrgbd_000017_infos.pkl
├── inference_demo.ipynb
├── mono_det_demo.py
├── multi_modality_demo.py
├── pcd_demo.py
└── pcd_seg_demo.py
├── docker
├── Dockerfile
└── serve
│ ├── Dockerfile
│ ├── config.properties
│ └── entrypoint.sh
├── docs
├── en
│ ├── Makefile
│ ├── _static
│ │ ├── css
│ │ │ └── readthedocs.css
│ │ └── image
│ │ │ └── mmdet3d-logo.png
│ ├── advanced_guides
│ │ ├── customize_dataset.md
│ │ ├── customize_models.md
│ │ ├── customize_runtime.md
│ │ ├── datasets
│ │ │ ├── index.rst
│ │ │ ├── kitti.md
│ │ │ ├── lyft.md
│ │ │ ├── nuscenes.md
│ │ │ ├── s3dis.md
│ │ │ ├── scannet.md
│ │ │ ├── semantickitti.md
│ │ │ ├── sunrgbd.md
│ │ │ └── waymo.md
│ │ ├── index.rst
│ │ ├── pure_point_cloud_dataset.md
│ │ └── supported_tasks
│ │ │ ├── index.rst
│ │ │ ├── lidar_det3d.md
│ │ │ ├── lidar_sem_seg3d.md
│ │ │ └── vision_det3d.md
│ ├── api.rst
│ ├── conf.py
│ ├── get_started.md
│ ├── index.rst
│ ├── make.bat
│ ├── migration.md
│ ├── model_zoo.md
│ ├── notes
│ │ ├── benchmarks.md
│ │ ├── changelog.md
│ │ ├── changelog_v1.0.x.md
│ │ ├── compatibility.md
│ │ ├── contribution_guides.md
│ │ ├── faq.md
│ │ └── index.rst
│ ├── stat.py
│ ├── switch_language.md
│ └── user_guides
│ │ ├── backends_support.md
│ │ ├── config.md
│ │ ├── coord_sys_tutorial.md
│ │ ├── data_pipeline.md
│ │ ├── dataset_prepare.md
│ │ ├── index.rst
│ │ ├── inference.md
│ │ ├── model_deployment.md
│ │ ├── new_data_model.md
│ │ ├── train_test.md
│ │ ├── useful_tools.md
│ │ └── visualization.md
└── zh_cn
│ ├── Makefile
│ ├── _static
│ ├── css
│ │ └── readthedocs.css
│ └── image
│ │ └── mmdet3d-logo.png
│ ├── advanced_guides
│ ├── customize_dataset.md
│ ├── customize_models.md
│ ├── customize_runtime.md
│ ├── datasets
│ │ ├── index.rst
│ │ ├── kitti.md
│ │ ├── lyft.md
│ │ ├── nuscenes.md
│ │ ├── s3dis.md
│ │ ├── scannet.md
│ │ ├── semantickitti.md
│ │ ├── sunrgbd.md
│ │ └── waymo.md
│ ├── index.rst
│ └── supported_tasks
│ │ ├── index.rst
│ │ ├── lidar_det3d.md
│ │ ├── lidar_sem_seg3d.md
│ │ └── vision_det3d.md
│ ├── api.rst
│ ├── conf.py
│ ├── get_started.md
│ ├── index.rst
│ ├── make.bat
│ ├── model_zoo.md
│ ├── notes
│ ├── benchmarks.md
│ ├── changelog.md
│ ├── changelog_v1.0.x.md
│ ├── compatibility.md
│ ├── faq.md
│ └── index.rst
│ ├── stat.py
│ ├── switch_language.md
│ └── user_guides
│ ├── backends_support.md
│ ├── config.md
│ ├── coord_sys_tutorial.md
│ ├── data_pipeline.md
│ ├── dataset_prepare.md
│ ├── index.rst
│ ├── inference.md
│ ├── model_deployment.md
│ ├── new_data_model.md
│ ├── train_test.md
│ ├── useful_tools.md
│ └── visualization.md
├── mmdet3d
├── __init__.py
├── apis
│ ├── __init__.py
│ ├── inference.py
│ └── inferencers
│ │ ├── __init__.py
│ │ ├── base_3d_inferencer.py
│ │ ├── lidar_det3d_inferencer.py
│ │ ├── lidar_seg3d_inferencer.py
│ │ ├── mono_det3d_inferencer.py
│ │ └── multi_modality_det3d_inferencer.py
├── configs
│ ├── _base_
│ │ ├── datasets
│ │ │ ├── kitti_3d_3class.py
│ │ │ ├── kitti_3d_car.py
│ │ │ ├── kitti_mono3d.py
│ │ │ ├── lyft_3d.py
│ │ │ ├── lyft_3d_range100.py
│ │ │ ├── nuim_instance.py
│ │ │ ├── nus_3d.py
│ │ │ ├── nus_mono3d.py
│ │ │ ├── s3dis_3d.py
│ │ │ ├── s3dis_seg.py
│ │ │ ├── scannet_3d.py
│ │ │ ├── scannet_seg.py
│ │ │ ├── semantickitti.py
│ │ │ ├── sunrgbd_3d.py
│ │ │ ├── waymoD5_3d_3class.py
│ │ │ ├── waymoD5_3d_car.py
│ │ │ ├── waymoD5_fov_mono3d_3class.py
│ │ │ ├── waymoD5_mv3d_3class.py
│ │ │ └── waymoD5_mv_mono3d_3class.py
│ │ ├── default_runtime.py
│ │ ├── models
│ │ │ ├── centerpoint_pillar02_second_secfpn_nus.py
│ │ │ ├── centerpoint_voxel01_second_secfpn_nus.py
│ │ │ ├── cylinder3d.py
│ │ │ ├── fcos3d.py
│ │ │ ├── minkunet.py
│ │ │ ├── pgd.py
│ │ │ └── votenet.py
│ │ └── schedules
│ │ │ ├── cosine.py
│ │ │ ├── cyclic_20e.py
│ │ │ ├── cyclic_40e.py
│ │ │ ├── mmdet_schedule_1x.py
│ │ │ ├── schedule_2x.py
│ │ │ ├── schedule_3x.py
│ │ │ ├── seg_cosine_100e.py
│ │ │ ├── seg_cosine_150e.py
│ │ │ ├── seg_cosine_200e.py
│ │ │ └── seg_cosine_50e.py
│ ├── centerpoint
│ │ ├── centerpoint_pillar02_second_secfpn_8xb4_cyclic_20e_nus_3d.py
│ │ └── centerpoint_voxel01_second_secfpn_8xb4_cyclic_20e_nus_3d.py
│ ├── cylinder3d
│ │ ├── cylinder3d_4xb4-3x_semantickitti.py
│ │ └── cylinder3d_8xb2-laser-polar-mix-3x_semantickitti.py
│ ├── minkunet
│ │ └── minkunet34_w32_torchsparse_8xb2_laser_polar_mix_3x_semantickitti.py
│ ├── mvxnet
│ │ └── mvxnet_fpn_dv_second_secfpn_8xb2_80e_kitti_3d_3class.py
│ ├── pgd
│ │ └── pgd_r101_caffe_fpn_head_gn_4xb3_4x_kitti_mono3d.py
│ └── votenet
│ │ ├── __init__.py
│ │ └── votenet_8xb8_scannet_3d.py
├── datasets
│ ├── __init__.py
│ ├── convert_utils.py
│ ├── dataset_wrappers.py
│ ├── det3d_dataset.py
│ ├── kitti2d_dataset.py
│ ├── kitti_dataset.py
│ ├── lyft_dataset.py
│ ├── nuscenes_dataset.py
│ ├── s3dis_dataset.py
│ ├── scannet_dataset.py
│ ├── seg3d_dataset.py
│ ├── semantickitti_dataset.py
│ ├── sunrgbd_dataset.py
│ ├── transforms
│ │ ├── __init__.py
│ │ ├── data_augment_utils.py
│ │ ├── dbsampler.py
│ │ ├── formating.py
│ │ ├── loading.py
│ │ ├── test_time_aug.py
│ │ └── transforms_3d.py
│ ├── utils.py
│ └── waymo_dataset.py
├── engine
│ ├── __init__.py
│ └── hooks
│ │ ├── __init__.py
│ │ ├── benchmark_hook.py
│ │ ├── disable_object_sample_hook.py
│ │ └── visualization_hook.py
├── evaluation
│ ├── __init__.py
│ ├── functional
│ │ ├── __init__.py
│ │ ├── indoor_eval.py
│ │ ├── instance_seg_eval.py
│ │ ├── kitti_utils
│ │ │ ├── __init__.py
│ │ │ ├── eval.py
│ │ │ └── rotate_iou.py
│ │ ├── lyft_eval.py
│ │ ├── panoptic_seg_eval.py
│ │ ├── scannet_utils
│ │ │ ├── __init__.py
│ │ │ ├── evaluate_semantic_instance.py
│ │ │ └── util_3d.py
│ │ ├── seg_eval.py
│ │ └── waymo_utils
│ │ │ ├── __init__.py
│ │ │ └── prediction_to_waymo.py
│ └── metrics
│ │ ├── __init__.py
│ │ ├── indoor_metric.py
│ │ ├── instance_seg_metric.py
│ │ ├── kitti_metric.py
│ │ ├── lyft_metric.py
│ │ ├── nuscenes_metric.py
│ │ ├── panoptic_seg_metric.py
│ │ ├── seg_metric.py
│ │ └── waymo_metric.py
├── models
│ ├── __init__.py
│ ├── backbones
│ │ ├── __init__.py
│ │ ├── base_pointnet.py
│ │ ├── cylinder3d.py
│ │ ├── dgcnn.py
│ │ ├── dla.py
│ │ ├── mink_resnet.py
│ │ ├── minkunet_backbone.py
│ │ ├── multi_backbone.py
│ │ ├── nostem_regnet.py
│ │ ├── pointnet2_sa_msg.py
│ │ ├── pointnet2_sa_ssg.py
│ │ ├── second.py
│ │ └── spvcnn_backone.py
│ ├── data_preprocessors
│ │ ├── __init__.py
│ │ ├── data_preprocessor.py
│ │ ├── utils.py
│ │ └── voxelize.py
│ ├── decode_heads
│ │ ├── __init__.py
│ │ ├── cylinder3d_head.py
│ │ ├── decode_head.py
│ │ ├── dgcnn_head.py
│ │ ├── minkunet_head.py
│ │ ├── paconv_head.py
│ │ └── pointnet2_head.py
│ ├── dense_heads
│ │ ├── __init__.py
│ │ ├── anchor3d_head.py
│ │ ├── anchor_free_mono3d_head.py
│ │ ├── base_3d_dense_head.py
│ │ ├── base_conv_bbox_head.py
│ │ ├── base_mono3d_dense_head.py
│ │ ├── centerpoint_head.py
│ │ ├── fcaf3d_head.py
│ │ ├── fcos_mono3d_head.py
│ │ ├── free_anchor3d_head.py
│ │ ├── groupfree3d_head.py
│ │ ├── imvoxel_head.py
│ │ ├── monoflex_head.py
│ │ ├── parta2_rpn_head.py
│ │ ├── pgd_head.py
│ │ ├── point_rpn_head.py
│ │ ├── shape_aware_head.py
│ │ ├── smoke_mono3d_head.py
│ │ ├── ssd_3d_head.py
│ │ ├── train_mixins.py
│ │ └── vote_head.py
│ ├── detectors
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── centerpoint.py
│ │ ├── dfm.py
│ │ ├── dynamic_voxelnet.py
│ │ ├── fcos_mono3d.py
│ │ ├── groupfree3dnet.py
│ │ ├── h3dnet.py
│ │ ├── imvotenet.py
│ │ ├── imvoxelnet.py
│ │ ├── mink_single_stage.py
│ │ ├── multiview_dfm.py
│ │ ├── mvx_faster_rcnn.py
│ │ ├── mvx_two_stage.py
│ │ ├── parta2.py
│ │ ├── point_rcnn.py
│ │ ├── pv_rcnn.py
│ │ ├── sassd.py
│ │ ├── single_stage.py
│ │ ├── single_stage_mono3d.py
│ │ ├── smoke_mono3d.py
│ │ ├── ssd3dnet.py
│ │ ├── two_stage.py
│ │ ├── votenet.py
│ │ └── voxelnet.py
│ ├── layers
│ │ ├── __init__.py
│ │ ├── box3d_nms.py
│ │ ├── dgcnn_modules
│ │ │ ├── __init__.py
│ │ │ ├── dgcnn_fa_module.py
│ │ │ ├── dgcnn_fp_module.py
│ │ │ └── dgcnn_gf_module.py
│ │ ├── edge_fusion_module.py
│ │ ├── fusion_layers
│ │ │ ├── __init__.py
│ │ │ ├── coord_transform.py
│ │ │ ├── point_fusion.py
│ │ │ └── vote_fusion.py
│ │ ├── minkowski_engine_block.py
│ │ ├── mlp.py
│ │ ├── norm.py
│ │ ├── paconv
│ │ │ ├── __init__.py
│ │ │ ├── paconv.py
│ │ │ └── utils.py
│ │ ├── pointnet_modules
│ │ │ ├── __init__.py
│ │ │ ├── builder.py
│ │ │ ├── paconv_sa_module.py
│ │ │ ├── point_fp_module.py
│ │ │ ├── point_sa_module.py
│ │ │ └── stack_point_sa_module.py
│ │ ├── sparse_block.py
│ │ ├── spconv
│ │ │ ├── __init__.py
│ │ │ └── overwrite_spconv
│ │ │ │ ├── __init__.py
│ │ │ │ └── write_spconv2.py
│ │ ├── torchsparse
│ │ │ ├── __init__.py
│ │ │ └── torchsparse_wrapper.py
│ │ ├── torchsparse_block.py
│ │ ├── transformer.py
│ │ └── vote_module.py
│ ├── losses
│ │ ├── __init__.py
│ │ ├── axis_aligned_iou_loss.py
│ │ ├── chamfer_distance.py
│ │ ├── lovasz_loss.py
│ │ ├── multibin_loss.py
│ │ ├── paconv_regularization_loss.py
│ │ ├── rotated_iou_loss.py
│ │ └── uncertain_smooth_l1_loss.py
│ ├── middle_encoders
│ │ ├── __init__.py
│ │ ├── pillar_scatter.py
│ │ ├── sparse_encoder.py
│ │ ├── sparse_unet.py
│ │ └── voxel_set_abstraction.py
│ ├── necks
│ │ ├── __init__.py
│ │ ├── dla_neck.py
│ │ ├── imvoxel_neck.py
│ │ ├── pointnet2_fp_neck.py
│ │ └── second_fpn.py
│ ├── roi_heads
│ │ ├── __init__.py
│ │ ├── base_3droi_head.py
│ │ ├── bbox_heads
│ │ │ ├── __init__.py
│ │ │ ├── h3d_bbox_head.py
│ │ │ ├── parta2_bbox_head.py
│ │ │ ├── point_rcnn_bbox_head.py
│ │ │ └── pv_rcnn_bbox_head.py
│ │ ├── h3d_roi_head.py
│ │ ├── mask_heads
│ │ │ ├── __init__.py
│ │ │ ├── foreground_segmentation_head.py
│ │ │ ├── pointwise_semantic_head.py
│ │ │ └── primitive_head.py
│ │ ├── part_aggregation_roi_head.py
│ │ ├── point_rcnn_roi_head.py
│ │ ├── pv_rcnn_roi_head.py
│ │ └── roi_extractors
│ │ │ ├── __init__.py
│ │ │ ├── batch_roigridpoint_extractor.py
│ │ │ ├── single_roiaware_extractor.py
│ │ │ └── single_roipoint_extractor.py
│ ├── segmentors
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cylinder3d.py
│ │ ├── encoder_decoder.py
│ │ ├── minkunet.py
│ │ └── seg3d_tta.py
│ ├── task_modules
│ │ ├── __init__.py
│ │ ├── anchor
│ │ │ ├── __init__.py
│ │ │ ├── anchor_3d_generator.py
│ │ │ └── builder.py
│ │ ├── assigners
│ │ │ ├── __init__.py
│ │ │ └── max_3d_iou_assigner.py
│ │ ├── builder.py
│ │ ├── coders
│ │ │ ├── __init__.py
│ │ │ ├── anchor_free_bbox_coder.py
│ │ │ ├── centerpoint_bbox_coders.py
│ │ │ ├── delta_xyzwhlr_bbox_coder.py
│ │ │ ├── fcos3d_bbox_coder.py
│ │ │ ├── groupfree3d_bbox_coder.py
│ │ │ ├── monoflex_bbox_coder.py
│ │ │ ├── partial_bin_based_bbox_coder.py
│ │ │ ├── pgd_bbox_coder.py
│ │ │ ├── point_xyzwhlr_bbox_coder.py
│ │ │ └── smoke_bbox_coder.py
│ │ ├── samplers
│ │ │ ├── __init__.py
│ │ │ ├── iou_neg_piecewise_sampler.py
│ │ │ └── pseudosample.py
│ │ └── voxel
│ │ │ ├── __init__.py
│ │ │ └── voxel_generator.py
│ ├── test_time_augs
│ │ ├── __init__.py
│ │ └── merge_augs.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── add_prefix.py
│ │ ├── clip_sigmoid.py
│ │ ├── edge_indices.py
│ │ ├── gaussian.py
│ │ ├── gen_keypoints.py
│ │ └── handle_objs.py
│ └── voxel_encoders
│ │ ├── __init__.py
│ │ ├── pillar_encoder.py
│ │ ├── utils.py
│ │ └── voxel_encoder.py
├── registry.py
├── structures
│ ├── __init__.py
│ ├── bbox_3d
│ │ ├── __init__.py
│ │ ├── base_box3d.py
│ │ ├── box_3d_mode.py
│ │ ├── cam_box3d.py
│ │ ├── coord_3d_mode.py
│ │ ├── depth_box3d.py
│ │ ├── lidar_box3d.py
│ │ └── utils.py
│ ├── det3d_data_sample.py
│ ├── ops
│ │ ├── __init__.py
│ │ ├── box_np_ops.py
│ │ ├── iou3d_calculator.py
│ │ └── transforms.py
│ ├── point_data.py
│ └── points
│ │ ├── __init__.py
│ │ ├── base_points.py
│ │ ├── cam_points.py
│ │ ├── depth_points.py
│ │ └── lidar_points.py
├── testing
│ ├── __init__.py
│ ├── data_utils.py
│ └── model_utils.py
├── utils
│ ├── __init__.py
│ ├── array_converter.py
│ ├── collect_env.py
│ ├── compat_cfg.py
│ ├── misc.py
│ ├── setup_env.py
│ └── typing_utils.py
├── version.py
└── visualization
│ ├── __init__.py
│ ├── local_visualizer.py
│ └── vis_utils.py
├── model-index.yml
├── projects
├── BEVFusion
│ ├── README.md
│ ├── bevfusion
│ │ ├── __init__.py
│ │ ├── bevfusion.py
│ │ ├── bevfusion_necks.py
│ │ ├── depth_lss.py
│ │ ├── loading.py
│ │ ├── ops
│ │ │ ├── __init__.py
│ │ │ ├── bev_pool
│ │ │ │ ├── __init__.py
│ │ │ │ ├── bev_pool.py
│ │ │ │ └── src
│ │ │ │ │ ├── bev_pool.cpp
│ │ │ │ │ └── bev_pool_cuda.cu
│ │ │ └── voxel
│ │ │ │ ├── __init__.py
│ │ │ │ ├── scatter_points.py
│ │ │ │ ├── src
│ │ │ │ ├── scatter_points_cpu.cpp
│ │ │ │ ├── scatter_points_cuda.cu
│ │ │ │ ├── voxelization.cpp
│ │ │ │ ├── voxelization.h
│ │ │ │ ├── voxelization_cpu.cpp
│ │ │ │ └── voxelization_cuda.cu
│ │ │ │ └── voxelize.py
│ │ ├── sparse_encoder.py
│ │ ├── transformer.py
│ │ ├── transforms_3d.py
│ │ ├── transfusion_head.py
│ │ └── utils.py
│ ├── configs
│ │ ├── bevfusion_lidar-cam_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py
│ │ └── bevfusion_lidar_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py
│ ├── demo
│ │ └── multi_modality_demo.py
│ └── setup.py
├── CENet
│ ├── README.md
│ ├── cenet
│ │ ├── __init__.py
│ │ ├── boundary_loss.py
│ │ ├── cenet_backbone.py
│ │ ├── range_image_head.py
│ │ ├── range_image_segmentor.py
│ │ ├── transforms_3d.py
│ │ └── utils.py
│ └── configs
│ │ ├── cenet-64x1024_4xb4_semantickitti.py
│ │ ├── cenet-64x2048_4xb4_semantickitti.py
│ │ └── cenet-64x512_4xb4_semantickitti.py
├── CenterFormer
│ ├── README.md
│ ├── centerformer
│ │ ├── __init__.py
│ │ ├── bbox_ops.py
│ │ ├── centerformer.py
│ │ ├── centerformer_backbone.py
│ │ ├── centerformer_head.py
│ │ ├── losses.py
│ │ ├── multi_scale_deform_attn.py
│ │ └── transformer.py
│ └── configs
│ │ └── centerformer_voxel01_second-attn_secfpn-attn_4xb4-cyclic-20e_waymoD5-3d-3class.py
├── DETR3D
│ ├── README.md
│ ├── configs
│ │ ├── detr3d_r101_gridmask.py
│ │ ├── detr3d_r101_gridmask_cbgs.py
│ │ └── detr3d_vovnet_gridmask_trainval_cbgs.py
│ ├── detr3d
│ │ ├── __init__.py
│ │ ├── detr3d.py
│ │ ├── detr3d_head.py
│ │ ├── detr3d_transformer.py
│ │ ├── grid_mask.py
│ │ ├── hungarian_assigner_3d.py
│ │ ├── match_cost.py
│ │ ├── nms_free_coder.py
│ │ ├── util.py
│ │ └── vovnet.py
│ └── old_detr3d_converter.py
├── DSVT
│ ├── README.md
│ ├── configs
│ │ └── dsvt_voxel032_res-second_secfpn_8xb1-cyclic-12e_waymoD5-3d-3class.py
│ ├── dsvt
│ │ ├── __init__.py
│ │ ├── disable_aug_hook.py
│ │ ├── dsvt.py
│ │ ├── dsvt_head.py
│ │ ├── dsvt_input_layer.py
│ │ ├── dsvt_transformer.py
│ │ ├── dynamic_pillar_vfe.py
│ │ ├── map2bev.py
│ │ ├── ops
│ │ │ └── ingroup_inds
│ │ │ │ ├── ingroup_inds_op.py
│ │ │ │ └── src
│ │ │ │ ├── error.cuh
│ │ │ │ ├── ingroup_inds.cpp
│ │ │ │ └── ingroup_inds_kernel.cu
│ │ ├── res_second.py
│ │ ├── transforms_3d.py
│ │ └── utils.py
│ └── setup.py
├── NeRF-Det
│ ├── README.md
│ ├── configs
│ │ ├── nerfdet_res101_2x_low_res_depth.py
│ │ ├── nerfdet_res50_2x_low_res.py
│ │ └── nerfdet_res50_2x_low_res_depth.py
│ ├── nerfdet
│ │ ├── __init__.py
│ │ ├── data_preprocessor.py
│ │ ├── formating.py
│ │ ├── multiview_pipeline.py
│ │ ├── nerf_det3d_data_sample.py
│ │ ├── nerf_utils
│ │ │ ├── nerf_mlp.py
│ │ │ ├── projection.py
│ │ │ ├── render_ray.py
│ │ │ └── save_rendered_img.py
│ │ ├── nerfdet.py
│ │ ├── nerfdet_head.py
│ │ └── scannet_multiview_dataset.py
│ └── prepare_infos.py
├── PETR
│ ├── README.md
│ ├── configs
│ │ └── petr_vovnet_gridmask_p4_800x320.py
│ └── petr
│ │ ├── __init__.py
│ │ ├── cp_fpn.py
│ │ ├── grid_mask.py
│ │ ├── hungarian_assigner_3d.py
│ │ ├── match_cost.py
│ │ ├── nms_free_coder.py
│ │ ├── petr.py
│ │ ├── petr_head.py
│ │ ├── petr_transformer.py
│ │ ├── positional_encoding.py
│ │ ├── transforms_3d.py
│ │ ├── utils.py
│ │ └── vovnetcp.py
├── TPVFormer
│ ├── README.md
│ ├── configs
│ │ └── tpvformer_8xb1-2x_nus-seg.py
│ └── tpvformer
│ │ ├── __init__.py
│ │ ├── cross_view_hybrid_attention.py
│ │ ├── data_preprocessor.py
│ │ ├── image_cross_attention.py
│ │ ├── loading.py
│ │ ├── nuscenes_dataset.py
│ │ ├── positional_encoding.py
│ │ ├── tpvformer.py
│ │ ├── tpvformer_encoder.py
│ │ ├── tpvformer_head.py
│ │ └── tpvformer_layer.py
├── TR3D
│ ├── README.md
│ ├── configs
│ │ ├── tr3d.py
│ │ ├── tr3d_1xb16_s3dis-3d-5class.py
│ │ ├── tr3d_1xb16_scannet-3d-18class.py
│ │ └── tr3d_1xb16_sunrgbd-3d-10class.py
│ └── tr3d
│ │ ├── __init__.py
│ │ ├── axis_aligned_iou_loss.py
│ │ ├── mink_resnet.py
│ │ ├── rotated_iou_loss.py
│ │ ├── tr3d_head.py
│ │ ├── tr3d_neck.py
│ │ └── transforms_3d.py
└── example_project
│ ├── README.md
│ ├── configs
│ └── fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py
│ └── dummy
│ ├── __init__.py
│ └── dummy_resnet.py
├── requirements.txt
├── requirements
├── build.txt
├── docs.txt
├── mminstall.txt
├── optional.txt
├── readthedocs.txt
├── runtime.txt
└── tests.txt
├── resources
├── browse_dataset_mono.png
├── browse_dataset_multi_modality.png
├── browse_dataset_seg.png
├── coord_sys_all.png
├── data_pipeline.png
├── loss_curve.png
├── mmdet3d-logo.png
├── mmdet3d_outdoor_demo.gif
├── nuimages_demo.gif
├── open3d_visual.gif
└── open3d_visual.png
├── setup.cfg
├── setup.py
├── tests
├── data
│ ├── kitti
│ │ ├── a.bin
│ │ ├── kitti_dbinfos_train.pkl
│ │ ├── kitti_gt_database
│ │ │ └── 0_Pedestrian_0.bin
│ │ ├── kitti_infos_mono3d.coco.json
│ │ ├── kitti_infos_mono3d.pkl
│ │ ├── kitti_infos_train.pkl
│ │ ├── mono3d_sample_results.pkl
│ │ ├── mono3d_sample_results2d.pkl
│ │ └── training
│ │ │ ├── calib
│ │ │ └── 000000.pkl
│ │ │ ├── image_2
│ │ │ ├── 000000.png
│ │ │ └── 000007.png
│ │ │ ├── velodyne
│ │ │ └── 000000.bin
│ │ │ └── velodyne_reduced
│ │ │ └── 000000.bin
│ ├── lyft
│ │ ├── lidar
│ │ │ ├── host-a017_lidar1_1236118886501000046.bin
│ │ │ ├── host-a017_lidar1_1236118886701083686.bin
│ │ │ └── host-a017_lidar1_1236118886901125926.bin
│ │ ├── lyft_infos.pkl
│ │ ├── lyft_infos_val.pkl
│ │ ├── sample_results.pkl
│ │ ├── v1.01-train
│ │ │ ├── maps
│ │ │ │ └── map_raster_palo_alto.png
│ │ │ └── v1.01-train
│ │ │ │ ├── attribute.json
│ │ │ │ ├── calibrated_sensor.json
│ │ │ │ ├── category.json
│ │ │ │ ├── ego_pose.json
│ │ │ │ ├── instance.json
│ │ │ │ ├── log.json
│ │ │ │ ├── map.json
│ │ │ │ ├── sample.json
│ │ │ │ ├── sample_annotation.json
│ │ │ │ ├── sample_data.json
│ │ │ │ ├── scene.json
│ │ │ │ ├── sensor.json
│ │ │ │ └── visibility.json
│ │ └── val.txt
│ ├── nuscenes
│ │ ├── mono3d_sample_results.pkl
│ │ ├── nus_info.pkl
│ │ ├── nus_infos_mono3d.coco.json
│ │ ├── samples
│ │ │ ├── CAM_BACK_LEFT
│ │ │ │ └── n015-2018-07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg
│ │ │ └── LIDAR_TOP
│ │ │ │ └── n015-2018-08-02-17-16-37+0800__LIDAR_TOP__1533201470948018.pcd.bin
│ │ └── sweeps
│ │ │ └── LIDAR_TOP
│ │ │ ├── n008-2018-09-18-12-07-26-0400__LIDAR_TOP__1537287083900561.pcd.bin
│ │ │ └── n015-2018-08-02-17-16-37+0800__LIDAR_TOP__1533201470898274.pcd.bin
│ ├── ops
│ │ ├── features_for_fps_distance.npy
│ │ └── fps_idx.npy
│ ├── s3dis
│ │ ├── instance_mask
│ │ │ └── Area_1_office_2.bin
│ │ ├── points
│ │ │ └── Area_1_office_2.bin
│ │ ├── s3dis_infos.pkl
│ │ └── semantic_mask
│ │ │ └── Area_1_office_2.bin
│ ├── scannet
│ │ ├── instance_mask
│ │ │ └── scene0000_00.bin
│ │ ├── points
│ │ │ └── scene0000_00.bin
│ │ ├── scannet_infos.pkl
│ │ └── semantic_mask
│ │ │ └── scene0000_00.bin
│ ├── semantickitti
│ │ ├── semantickitti_infos.pkl
│ │ └── sequences
│ │ │ └── 00
│ │ │ ├── labels
│ │ │ └── 000000.label
│ │ │ └── velodyne
│ │ │ └── 000000.bin
│ ├── sunrgbd
│ │ ├── points
│ │ │ └── 000001.bin
│ │ ├── sunrgbd_infos.pkl
│ │ └── sunrgbd_trainval
│ │ │ └── image
│ │ │ └── 000001.jpg
│ └── waymo
│ │ ├── kitti_format
│ │ ├── training
│ │ │ ├── image_0
│ │ │ │ ├── 0000000.png
│ │ │ │ └── 1000000.png
│ │ │ └── velodyne
│ │ │ │ ├── 0000000.bin
│ │ │ │ └── 1000000.bin
│ │ ├── waymo_dbinfos_train.pkl
│ │ ├── waymo_gt_database
│ │ │ └── 0_Car_0.bin
│ │ ├── waymo_infos_train.pkl
│ │ └── waymo_infos_val.pkl
│ │ └── waymo_format
│ │ ├── gt.bin
│ │ └── validation
│ │ └── val.tfrecord
├── test_apis
│ └── test_inferencers
│ │ ├── test_lidar_det3d_inferencer.py
│ │ ├── test_lidar_seg3d_inferencer.py
│ │ ├── test_mono_det3d_inferencer.py
│ │ └── test_multi_modality_det3d_inferencer.py
├── test_datasets
│ ├── test_dataset_wrappers.py
│ ├── test_kitti_dataset.py
│ ├── test_lyft_dataset.py
│ ├── test_nuscenes_dataset.py
│ ├── test_s3dis_dataset.py
│ ├── test_scannet_dataset.py
│ ├── test_semantickitti_dataset.py
│ ├── test_sunrgbd_dataset.py
│ ├── test_transforms
│ │ ├── test_formating.py
│ │ ├── test_loading.py
│ │ ├── test_transforms_3d.py
│ │ └── utils.py
│ ├── test_tta.py
│ └── test_waymo_dataset.py
├── test_engine
│ └── test_hooks
│ │ ├── test_disable_object_sample_hook.py
│ │ └── test_visualization_hook.py
├── test_evaluation
│ ├── test_functional
│ │ ├── test_instance_seg_eval.py
│ │ ├── test_kitti_eval.py
│ │ ├── test_panoptic_seg_eval.py
│ │ └── test_seg_eval.py
│ └── test_metrics
│ │ ├── test_indoor_metric.py
│ │ ├── test_instance_seg_metric.py
│ │ ├── test_kitti_metric.py
│ │ ├── test_panoptic_seg_metric.py
│ │ └── test_seg_metric.py
├── test_models
│ ├── test_backbones
│ │ ├── test_cylinder3d_backbone.py
│ │ ├── test_dgcnn.py
│ │ ├── test_dla.py
│ │ ├── test_mink_resnet.py
│ │ ├── test_minkunet_backbone.py
│ │ ├── test_multi_backbone.py
│ │ ├── test_pointnet2_sa_msg.py
│ │ ├── test_pointnet2_sa_ssg.py
│ │ └── test_spvcnn_backbone.py
│ ├── test_data_preprocessors
│ │ └── test_data_preprocessor.py
│ ├── test_decode_heads
│ │ ├── test_cylinder3d_head.py
│ │ ├── test_dgcnn_head.py
│ │ ├── test_minkunet_head.py
│ │ ├── test_paconv_head.py
│ │ └── test_pointnet2_head.py
│ ├── test_dense_heads
│ │ ├── test_anchor3d_head.py
│ │ ├── test_fcaf3d_head.py
│ │ ├── test_fcos_mono3d_head.py
│ │ ├── test_freeanchors.py
│ │ ├── test_imvoxel_head.py
│ │ ├── test_monoflex_head.py
│ │ ├── test_pgd_head.py
│ │ ├── test_smoke_mono3d_head.py
│ │ └── test_ssn.py
│ ├── test_detectors
│ │ ├── test_3dssd.py
│ │ ├── test_center_point.py
│ │ ├── test_fcaf3d.py
│ │ ├── test_groupfree3d.py
│ │ ├── test_h3dnet.py
│ │ ├── test_imvotenet.py
│ │ ├── test_imvoxelnet.py
│ │ ├── test_mvxnet.py
│ │ ├── test_parta2.py
│ │ ├── test_pointrcnn.py
│ │ ├── test_pvrcnn.py
│ │ ├── test_sassd.py
│ │ ├── test_votenet.py
│ │ └── test_voxelnet.py
│ ├── test_layers
│ │ ├── test_box3d_nms.py
│ │ ├── test_dgcnn_modules
│ │ │ ├── test_dgcnn_fa_module.py
│ │ │ ├── test_dgcnn_fp_module.py
│ │ │ └── test_dgcnn_gf_module.py
│ │ ├── test_fusion_layers
│ │ │ ├── test_fusion_coord_trans.py
│ │ │ ├── test_point_fusion.py
│ │ │ └── test_vote_fusion.py
│ │ ├── test_minkowski_engine
│ │ │ └── test_minkowski_engine_module.py
│ │ ├── test_paconv
│ │ │ ├── test_paconv_modules.py
│ │ │ └── test_paconv_ops.py
│ │ ├── test_pointnet_modules
│ │ │ ├── test_point_fp_module.py
│ │ │ └── test_point_sa_module.py
│ │ ├── test_spconv
│ │ │ └── test_spconv_module.py
│ │ ├── test_torchsparse
│ │ │ └── test_torchsparse_module.py
│ │ └── test_vote_module.py
│ ├── test_losses
│ │ ├── test_chamfer_disrance.py
│ │ ├── test_multibin_loss.py
│ │ ├── test_paconv_regularization_loss.py
│ │ ├── test_rotated_iou_loss.py
│ │ └── test_uncertain_smooth_l1_loss.py
│ ├── test_middle_encoders
│ │ ├── test_sparse_encoders.py
│ │ └── test_sparse_unet.py
│ ├── test_necks
│ │ ├── test_dla_neck.py
│ │ ├── test_imvoxel_neck.py
│ │ ├── test_pointnet2_fp_neck.py
│ │ └── test_second_fpn.py
│ ├── test_segmentors
│ │ ├── test_cylinder3d.py
│ │ ├── test_minkunet.py
│ │ └── test_seg3d_tta_model.py
│ ├── test_task_modules
│ │ ├── test_anchor
│ │ │ └── test_anchor_3d_generator.py
│ │ ├── test_coders
│ │ │ ├── test_anchor_free_box_coder.py
│ │ │ ├── test_centerpoint_bbox_coder.py
│ │ │ ├── test_fcos3d_bbox_coder.py
│ │ │ ├── test_monoflex_bbox_coder.py
│ │ │ ├── test_partial_bin_based_box_coder.py
│ │ │ ├── test_pgd_bbox_coder.py
│ │ │ ├── test_point_xyzwhlr_bbox_coder.py
│ │ │ └── test_smoke_bbox_coder.py
│ │ ├── test_samplers
│ │ │ └── test_iou_piecewise_sampler.py
│ │ └── test_voxel
│ │ │ └── test_voxel_generator.py
│ ├── test_utils
│ │ └── test_utils.py
│ └── test_voxel_encoders
│ │ ├── test_pillar_encoder.py
│ │ └── test_voxel_encoders.py
├── test_samples
│ └── parta2_roihead_inputs.npz
├── test_structures
│ ├── test_bbox
│ │ ├── test_box3d.py
│ │ └── test_coord_3d_mode.py
│ ├── test_det3d_data_sample.py
│ ├── test_ops
│ │ └── test_box_np_ops.py
│ ├── test_point_data.py
│ └── test_points
│ │ ├── test_base_points.py
│ │ ├── test_cam_points.py
│ │ └── test_depth_points.py
└── test_utils
│ ├── test_compat_cfg.py
│ └── test_setup_env.py
└── tools
├── analysis_tools
├── analyze_logs.py
├── benchmark.py
└── get_flops.py
├── create_data.py
├── create_data.sh
├── dataset_converters
├── create_gt_database.py
├── indoor_converter.py
├── kitti_converter.py
├── kitti_data_utils.py
├── kitti_unzip.sh
├── lyft_converter.py
├── lyft_data_fixer.py
├── nuimage_converter.py
├── nuscenes_converter.py
├── nuscenes_unzip.sh
├── s3dis_data_utils.py
├── scannet_data_utils.py
├── semantickitti_converter.py
├── semantickitti_unzip.sh
├── sunrgbd_data_utils.py
├── update_infos_to_v2.py
└── waymo_converter.py
├── deployment
├── mmdet3d2torchserve.py
├── mmdet3d_handler.py
└── test_torchserver.py
├── dist_test.sh
├── dist_train.sh
├── misc
├── browse_dataset.py
├── fuse_conv_bn.py
├── print_config.py
└── visualize_results.py
├── model_converters
├── convert_h3dnet_checkpoints.py
├── convert_votenet_checkpoints.py
├── publish_model.py
└── regnet2mmdet.py
├── slurm_test.sh
├── slurm_train.sh
├── test.py
├── train.py
├── update_data_coords.py
└── update_data_coords.sh
/.circleci/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG PYTORCH="1.8.1"
2 | ARG CUDA="10.2"
3 | ARG CUDNN="7"
4 |
5 | ARG DEBIAN_FRONTEND=noninteractive
6 |
7 | FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
8 |
9 | # To fix GPG key error when running apt-get update
10 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
11 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
12 |
13 | RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx
14 |
--------------------------------------------------------------------------------
/.dev_scripts/benchmark_options.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | third_part_libs = [
4 | 'conda install openblas-devel -c anaconda',
5 | "pip install -U git+https://github.com/NVIDIA/MinkowskiEngine -v --no-deps --install-option='--blas_include_dirs=/opt/conda/include' --install-option='--blas=openblas'" # noqa
6 | ]
7 | default_floating_range = 0.5
8 | model_floating_ranges = {
9 | 'configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py': # noqa
10 | 0.7
11 | }
12 |
--------------------------------------------------------------------------------
/.dev_scripts/benchmark_train_models.txt:
--------------------------------------------------------------------------------
1 | configs/3dssd/3dssd_4xb4_kitti-3d-car.py
2 | configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py
3 | configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py
4 | configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py
5 | configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py
6 | configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py
7 | configs/pointnet2/pointnet2_msg_2xb16-cosine-250e_scannet-seg.py
8 | configs/pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-3class.py
9 | configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py
10 | configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py
11 | configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py
12 | configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py
13 | configs/votenet/votenet_8xb8_scannet-3d.py
14 |
--------------------------------------------------------------------------------
/.dev_scripts/covignore.cfg:
--------------------------------------------------------------------------------
1 | # Each line should be the relative path to the root directory
2 | # of this repo. Support regular expression as well.
3 | # For example:
4 | # .*/utils.py
5 |
6 | .*/__init__.py
7 |
--------------------------------------------------------------------------------
/.dev_scripts/linter.sh:
--------------------------------------------------------------------------------
1 | yapf -r -i mmdet3d/ configs/ tests/ tools/
2 | isort mmdet3d/ configs/ tests/ tools/
3 | flake8 .
4 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | We appreciate all contributions to improve MMDetection3D. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline.
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 |
3 | contact_links:
4 | - name: Common Issues
5 | url: https://mmdetection3d.readthedocs.io/en/latest/notes/faq.html
6 | about: Check if your issue already has solutions
7 | - name: MMDet3D Documentation
8 | url: https://mmdetection3d.readthedocs.io/en/latest/
9 | about: Check if your question is answered in docs
10 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: deploy
2 |
3 | on: push
4 |
5 | concurrency:
6 | group: ${{ github.workflow }}-${{ github.ref }}
7 | cancel-in-progress: true
8 |
9 | jobs:
10 | build-n-publish:
11 | runs-on: ubuntu-latest
12 | if: startsWith(github.event.ref, 'refs/tags')
13 | steps:
14 | - uses: actions/checkout@v2
15 | - name: Set up Python 3.7
16 | uses: actions/setup-python@v2
17 | with:
18 | python-version: 3.7
19 | - name: Install torch
20 | run: pip install torch
21 | - name: Install wheel
22 | run: pip install wheel
23 | - name: Build MMDet3D
24 | run: python setup.py sdist bdist_wheel
25 | - name: Publish distribution to PyPI
26 | run: |
27 | pip install twine
28 | twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
29 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: lint
2 |
3 | on: [push, pull_request]
4 |
5 | concurrency:
6 | group: ${{ github.workflow }}-${{ github.ref }}
7 | cancel-in-progress: true
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v2
14 | - name: Set up Python 3.7
15 | uses: actions/setup-python@v2
16 | with:
17 | python-version: 3.7
18 | - name: Install pre-commit hook
19 | run: |
20 | pip install pre-commit
21 | pre-commit install
22 | - name: Linting
23 | run: pre-commit run --all-files
24 | - name: Check docstring coverage
25 | run: |
26 | pip install interrogate
27 | interrogate -v --ignore-init-method --ignore-magic --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 90 mmdet3d
28 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: ubuntu-22.04
5 | tools:
6 | python: "3.8"
7 |
8 | formats:
9 | - epub
10 |
11 | python:
12 | install:
13 | - requirements: requirements/docs.txt
14 | - requirements: requirements/readthedocs.txt
15 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - name: "MMDetection3D Contributors"
5 | title: "OpenMMLab's Next-generation Platform for General 3D Object Detection"
6 | date-released: 2020-07-23
7 | url: "https://github.com/open-mmlab/mmdetection3d"
8 | license: Apache-2.0
9 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include mmdet3d/.mim/model-index.yml
2 | include mmdet3d/.mim/dataset-index.yml
3 | include requirements/*.txt
4 | recursive-include mmdet3d/.mim/ops *.cpp *.cu *.h *.cc
5 | recursive-include mmdet3d/.mim/configs *.py *.yml
6 | recursive-include mmdet3d/.mim/tools *.sh *.py
7 |
--------------------------------------------------------------------------------
/configs/3dssd/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: 3DSSD
3 | Metadata:
4 | Training Data: KITTI
5 | Training Techniques:
6 | - AdamW
7 | Training Resources: 4x TITAN X
8 | Architecture:
9 | - PointNet++
10 | Paper:
11 | URL: https://arxiv.org/abs/2002.10187
12 | Title: '3DSSD: Point-based 3D Single Stage Object Detector'
13 | README: configs/3dssd/README.md
14 | Code:
15 | URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/ssd3dnet.py#L7
16 | Version: v0.6.0
17 |
18 | Models:
19 | - Name: 3dssd_4x4_kitti-3d-car
20 | In Collection: 3DSSD
21 | Config: configs/3dssd/3dssd_4xb4_kitti-3d-car.py
22 | Metadata:
23 | Training Memory (GB): 4.7
24 | Results:
25 | - Task: 3D Object Detection
26 | Dataset: KITTI
27 | Metrics:
28 | mAP: 78.58
29 | Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/3dssd/3dssd_4x4_kitti-3d-car/3dssd_4x4_kitti-3d-car_20210818_203828-b89c8fc4.pth
30 |
--------------------------------------------------------------------------------
/configs/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | default_scope = 'mmdet3d'
2 |
3 | default_hooks = dict(
4 | timer=dict(type='IterTimerHook'),
5 | logger=dict(type='LoggerHook', interval=50),
6 | param_scheduler=dict(type='ParamSchedulerHook'),
7 | checkpoint=dict(type='CheckpointHook', interval=-1),
8 | sampler_seed=dict(type='DistSamplerSeedHook'),
9 | visualization=dict(type='Det3DVisualizationHook'))
10 |
11 | env_cfg = dict(
12 | cudnn_benchmark=False,
13 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14 | dist_cfg=dict(backend='nccl'),
15 | )
16 |
17 | log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
18 |
19 | log_level = 'INFO'
20 | load_from = None
21 | resume = False
22 |
23 | # TODO: support auto scaling lr
24 |
--------------------------------------------------------------------------------
/configs/_base_/models/dgcnn.py:
--------------------------------------------------------------------------------
1 | # model settings
2 | model = dict(
3 | type='EncoderDecoder3D',
4 | data_preprocessor=dict(type='Det3DDataPreprocessor'),
5 | backbone=dict(
6 | type='DGCNNBackbone',
7 | in_channels=9, # [xyz, rgb, normal_xyz], modified with dataset
8 | num_samples=(20, 20, 20),
9 | knn_modes=('D-KNN', 'F-KNN', 'F-KNN'),
10 | radius=(None, None, None),
11 | gf_channels=((64, 64), (64, 64), (64, )),
12 | fa_channels=(1024, ),
13 | act_cfg=dict(type='LeakyReLU', negative_slope=0.2)),
14 | decode_head=dict(
15 | type='DGCNNHead',
16 | fp_channels=(1216, 512),
17 | channels=256,
18 | dropout_ratio=0.5,
19 | conv_cfg=dict(type='Conv1d'),
20 | norm_cfg=dict(type='BN1d'),
21 | act_cfg=dict(type='LeakyReLU', negative_slope=0.2),
22 | loss_decode=dict(
23 | type='mmdet.CrossEntropyLoss',
24 | use_sigmoid=False,
25 | class_weight=None, # modified with dataset
26 | loss_weight=1.0)),
27 | # model training and testing settings
28 | train_cfg=dict(),
29 | test_cfg=dict(mode='slide'))
30 |
--------------------------------------------------------------------------------
/configs/_base_/models/fcaf3d.py:
--------------------------------------------------------------------------------
1 | model = dict(
2 | type='MinkSingleStage3DDetector',
3 | data_preprocessor=dict(type='Det3DDataPreprocessor'),
4 | backbone=dict(type='MinkResNet', in_channels=3, depth=34),
5 | bbox_head=dict(
6 | type='FCAF3DHead',
7 | in_channels=(64, 128, 256, 512),
8 | out_channels=128,
9 | voxel_size=.01,
10 | pts_prune_threshold=100000,
11 | pts_assign_threshold=27,
12 | pts_center_threshold=18,
13 | num_classes=18,
14 | num_reg_outs=6,
15 | center_loss=dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True),
16 | bbox_loss=dict(type='AxisAlignedIoULoss'),
17 | cls_loss=dict(type='mmdet.FocalLoss'),
18 | ),
19 | train_cfg=dict(),
20 | test_cfg=dict(nms_pre=1000, iou_thr=.5, score_thr=.01))
21 |
--------------------------------------------------------------------------------
/configs/_base_/models/minkunet.py:
--------------------------------------------------------------------------------
1 | model = dict(
2 | type='MinkUNet',
3 | data_preprocessor=dict(
4 | type='Det3DDataPreprocessor',
5 | voxel=True,
6 | voxel_type='minkunet',
7 | batch_first=False,
8 | max_voxels=80000,
9 | voxel_layer=dict(
10 | max_num_points=-1,
11 | point_cloud_range=[-100, -100, -20, 100, 100, 20],
12 | voxel_size=[0.05, 0.05, 0.05],
13 | max_voxels=(-1, -1))),
14 | backbone=dict(
15 | type='MinkUNetBackbone',
16 | in_channels=4,
17 | num_stages=4,
18 | base_channels=32,
19 | encoder_channels=[32, 64, 128, 256],
20 | encoder_blocks=[2, 2, 2, 2],
21 | decoder_channels=[256, 128, 96, 96],
22 | decoder_blocks=[2, 2, 2, 2],
23 | block_type='basic',
24 | sparseconv_backend='torchsparse'),
25 | decode_head=dict(
26 | type='MinkUNetHead',
27 | channels=96,
28 | num_classes=19,
29 | dropout_ratio=0,
30 | loss_decode=dict(type='mmdet.CrossEntropyLoss', avg_non_ignore=True),
31 | ignore_index=19),
32 | train_cfg=dict(),
33 | test_cfg=dict())
34 |
--------------------------------------------------------------------------------
/configs/_base_/models/paconv_ssg-cuda.py:
--------------------------------------------------------------------------------
1 | _base_ = './paconv_ssg.py'
2 |
3 | model = dict(
4 | backbone=dict(
5 | sa_cfg=dict(
6 | type='PAConvCUDASAModule',
7 | scorenet_cfg=dict(mlp_channels=[8, 16, 16]))))
8 |
--------------------------------------------------------------------------------
/configs/_base_/models/pointpillars_hv_fpn_lyft.py:
--------------------------------------------------------------------------------
1 | _base_ = './pointpillars_hv_fpn_nus.py'
2 |
3 | # model settings (based on nuScenes model settings)
4 | # Voxel size for voxel encoder
5 | # Usually voxel size is changed consistently with the point cloud range
6 | # If point cloud range is modified, do remember to change all related
7 | # keys in the config.
8 | model = dict(
9 | data_preprocessor=dict(
10 | voxel_layer=dict(
11 | max_num_points=20,
12 | point_cloud_range=[-80, -80, -5, 80, 80, 3],
13 | max_voxels=(60000, 60000))),
14 | pts_voxel_encoder=dict(
15 | feat_channels=[64], point_cloud_range=[-80, -80, -5, 80, 80, 3]),
16 | pts_middle_encoder=dict(output_shape=[640, 640]),
17 | pts_bbox_head=dict(
18 | num_classes=9,
19 | anchor_generator=dict(
20 | ranges=[[-80, -80, -1.8, 80, 80, -1.8]], custom_values=[]),
21 | bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)),
22 | # model training settings (based on nuScenes model settings)
23 | train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])))
24 |
--------------------------------------------------------------------------------
/configs/_base_/models/pointpillars_hv_fpn_range100_lyft.py:
--------------------------------------------------------------------------------
1 | _base_ = './pointpillars_hv_fpn_nus.py'
2 |
3 | # model settings (based on nuScenes model settings)
4 | # Voxel size for voxel encoder
5 | # Usually voxel size is changed consistently with the point cloud range
6 | # If point cloud range is modified, do remember to change all related
7 | # keys in the config.
8 | model = dict(
9 | data_preprocessor=dict(
10 | voxel_layer=dict(
11 | max_num_points=20,
12 | point_cloud_range=[-100, -100, -5, 100, 100, 3],
13 | max_voxels=(60000, 60000))),
14 | pts_voxel_encoder=dict(
15 | feat_channels=[64], point_cloud_range=[-100, -100, -5, 100, 100, 3]),
16 | pts_middle_encoder=dict(output_shape=[800, 800]),
17 | pts_bbox_head=dict(
18 | num_classes=9,
19 | anchor_generator=dict(
20 | ranges=[[-100, -100, -1.8, 100, 100, -1.8]], custom_values=[]),
21 | bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)),
22 | # model training settings (based on nuScenes model settings)
23 | train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])))
24 |
--------------------------------------------------------------------------------
/configs/_base_/models/spvcnn.py:
--------------------------------------------------------------------------------
1 | model = dict(
2 | type='MinkUNet',
3 | data_preprocessor=dict(
4 | type='Det3DDataPreprocessor',
5 | voxel=True,
6 | voxel_type='minkunet',
7 | batch_first=False,
8 | max_voxels=80000,
9 | voxel_layer=dict(
10 | max_num_points=-1,
11 | point_cloud_range=[-100, -100, -20, 100, 100, 20],
12 | voxel_size=[0.05, 0.05, 0.05],
13 | max_voxels=(-1, -1))),
14 | backbone=dict(
15 | type='SPVCNNBackbone',
16 | in_channels=4,
17 | num_stages=4,
18 | base_channels=32,
19 | encoder_channels=[32, 64, 128, 256],
20 | encoder_blocks=[2, 2, 2, 2],
21 | decoder_channels=[256, 128, 96, 96],
22 | decoder_blocks=[2, 2, 2, 2],
23 | block_type='basic',
24 | sparseconv_backend='torchsparse',
25 | drop_ratio=0.3),
26 | decode_head=dict(
27 | type='MinkUNetHead',
28 | channels=96,
29 | num_classes=19,
30 | dropout_ratio=0,
31 | loss_decode=dict(type='mmdet.CrossEntropyLoss', avg_non_ignore=True),
32 | ignore_index=19),
33 | train_cfg=dict(),
34 | test_cfg=dict())
35 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/cosine.py:
--------------------------------------------------------------------------------
1 | # This schedule is mainly used by models with dynamic voxelization
2 | # optimizer
3 | lr = 0.003 # max learning rate
4 | optim_wrapper = dict(
5 | type='OptimWrapper',
6 | optimizer=dict(
7 | type='AdamW', lr=lr, weight_decay=0.001, betas=(0.95, 0.99)),
8 | clip_grad=dict(max_norm=10, norm_type=2),
9 | )
10 |
11 | param_scheduler = [
12 | dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
13 | dict(
14 | type='CosineAnnealingLR',
15 | begin=0,
16 | T_max=40,
17 | end=40,
18 | by_epoch=True,
19 | eta_min=1e-5)
20 | ]
21 | # training schedule for 1x
22 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=40, val_interval=1)
23 | val_cfg = dict(type='ValLoop')
24 | test_cfg = dict(type='TestLoop')
25 |
26 | # Default setting for scaling LR automatically
27 | # - `enable` means enable scaling LR automatically
28 | # or not by default.
29 | # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
30 | auto_scale_lr = dict(enable=False, base_batch_size=16)
31 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/mmdet-schedule-1x.py:
--------------------------------------------------------------------------------
1 | # training schedule for 1x
2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
3 | val_cfg = dict(type='ValLoop')
4 | test_cfg = dict(type='TestLoop')
5 |
6 | # learning rate
7 | param_scheduler = [
8 | dict(
9 | type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
10 | dict(
11 | type='MultiStepLR',
12 | begin=0,
13 | end=12,
14 | by_epoch=True,
15 | milestones=[8, 11],
16 | gamma=0.1)
17 | ]
18 |
19 | # optimizer
20 | optim_wrapper = dict(
21 | type='OptimWrapper',
22 | optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
23 |
24 | # Default setting for scaling LR automatically
25 | # - `enable` means enable scaling LR automatically
26 | # or not by default.
27 | # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
28 | auto_scale_lr = dict(enable=False, base_batch_size=16)
29 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/schedule-2x.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | # This schedule is mainly used by models on nuScenes dataset
3 | lr = 0.001
4 | optim_wrapper = dict(
5 | type='OptimWrapper',
6 | optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01),
7 | # max_norm=10 is better for SECOND
8 | clip_grad=dict(max_norm=35, norm_type=2))
9 |
10 | # training schedule for 2x
11 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=24)
12 | val_cfg = dict(type='ValLoop')
13 | test_cfg = dict(type='TestLoop')
14 |
15 | # learning rate
16 | param_scheduler = [
17 | dict(
18 | type='LinearLR',
19 | start_factor=1.0 / 1000,
20 | by_epoch=False,
21 | begin=0,
22 | end=1000),
23 | dict(
24 | type='MultiStepLR',
25 | begin=0,
26 | end=24,
27 | by_epoch=True,
28 | milestones=[20, 23],
29 | gamma=0.1)
30 | ]
31 |
32 | # Default setting for scaling LR automatically
33 | # - `enable` means enable scaling LR automatically
34 | # or not by default.
35 | # - `base_batch_size` = (8 GPUs) x (4 samples per GPU).
36 | auto_scale_lr = dict(enable=False, base_batch_size=32)
37 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/schedule-3x.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | # This schedule is mainly used by models on indoor dataset,
3 | # e.g., VoteNet on SUNRGBD and ScanNet
4 | lr = 0.008 # max learning rate
5 | optim_wrapper = dict(
6 | type='OptimWrapper',
7 | optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01),
8 | clip_grad=dict(max_norm=10, norm_type=2),
9 | )
10 |
11 | # training schedule for 3x
12 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=1)
13 | val_cfg = dict(type='ValLoop')
14 | test_cfg = dict(type='TestLoop')
15 |
16 | # learning rate
17 | param_scheduler = [
18 | dict(
19 | type='MultiStepLR',
20 | begin=0,
21 | end=36,
22 | by_epoch=True,
23 | milestones=[24, 32],
24 | gamma=0.1)
25 | ]
26 |
27 | # Default setting for scaling LR automatically
28 | # - `enable` means enable scaling LR automatically
29 | # or not by default.
30 | # - `base_batch_size` = (4 GPUs) x (8 samples per GPU).
31 | auto_scale_lr = dict(enable=False, base_batch_size=32)
32 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/seg-cosine-100e.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | # This schedule is mainly used on S3DIS dataset in segmentation task
3 | optim_wrapper = dict(
4 | type='OptimWrapper',
5 | optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.001),
6 | clip_grad=None)
7 |
8 | param_scheduler = [
9 | dict(
10 | type='CosineAnnealingLR',
11 | T_max=100,
12 | eta_min=1e-5,
13 | by_epoch=True,
14 | begin=0,
15 | end=100)
16 | ]
17 |
18 | # runtime settings
19 | train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1)
20 | val_cfg = dict()
21 | test_cfg = dict()
22 |
23 | # Default setting for scaling LR automatically
24 | # - `enable` means enable scaling LR automatically
25 | # or not by default.
26 | # - `base_batch_size` = (4 GPUs) x (32 samples per GPU).
27 | auto_scale_lr = dict(enable=False, base_batch_size=128)
28 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/seg-cosine-150e.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | # This schedule is mainly used on S3DIS dataset in segmentation task
3 | optim_wrapper = dict(
4 | type='OptimWrapper',
5 | optimizer=dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=0.0001),
6 | clip_grad=None)
7 |
8 | param_scheduler = [
9 | dict(
10 | type='CosineAnnealingLR',
11 | T_max=150,
12 | eta_min=0.002,
13 | by_epoch=True,
14 | begin=0,
15 | end=150)
16 | ]
17 |
18 | # runtime settings
19 | train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=1)
20 | val_cfg = dict()
21 | test_cfg = dict()
22 |
23 | # Default setting for scaling LR automatically
24 | # - `enable` means enable scaling LR automatically
25 | # or not by default.
26 | # - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
27 | auto_scale_lr = dict(enable=False, base_batch_size=64)
28 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/seg-cosine-200e.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | # This schedule is mainly used on S3DIS dataset in segmentation task
3 | optim_wrapper = dict(
4 | type='OptimWrapper',
5 | optimizer=dict(type='Adam', lr=0.001, weight_decay=0.01),
6 | clip_grad=None)
7 |
8 | param_scheduler = [
9 | dict(
10 | type='CosineAnnealingLR',
11 | T_max=200,
12 | eta_min=1e-5,
13 | by_epoch=True,
14 | begin=0,
15 | end=200)
16 | ]
17 |
18 | # runtime settings
19 | train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1)
20 | val_cfg = dict()
21 | test_cfg = dict()
22 |
23 | # Default setting for scaling LR automatically
24 | # - `enable` means enable scaling LR automatically
25 | # or not by default.
26 | # - `base_batch_size` = (2 GPUs) x (16 samples per GPU).
27 | auto_scale_lr = dict(enable=False, base_batch_size=32)
28 |
--------------------------------------------------------------------------------
/configs/_base_/schedules/seg-cosine-50e.py:
--------------------------------------------------------------------------------
1 | # optimizer
2 | # This schedule is mainly used on S3DIS dataset in segmentation task
3 | optim_wrapper = dict(
4 | type='OptimWrapper',
5 | optimizer=dict(type='Adam', lr=0.001, weight_decay=0.001),
6 | clip_grad=None)
7 |
8 | param_scheduler = [
9 | dict(
10 | type='CosineAnnealingLR',
11 | T_max=50,
12 | eta_min=1e-5,
13 | by_epoch=True,
14 | begin=0,
15 | end=50)
16 | ]
17 |
18 | # runtime settings
19 | train_cfg = dict(by_epoch=True, max_epochs=50, val_interval=1)
20 | val_cfg = dict()
21 | test_cfg = dict()
22 |
23 | # Default setting for scaling LR automatically
24 | # - `enable` means enable scaling LR automatically
25 | # or not by default.
26 | # - `base_batch_size` = (2 GPUs) x (16 samples per GPU).
27 | auto_scale_lr = dict(enable=False, base_batch_size=32)
28 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(test_cfg=dict(pts=dict(nms_type='circle')))
4 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(
4 | pts_bbox_head=dict(
5 | separate_head=dict(
6 | type='DCNSeparateHead',
7 | dcn_config=dict(
8 | type='DCN',
9 | in_channels=64,
10 | out_channels=64,
11 | kernel_size=3,
12 | padding=1,
13 | groups=4),
14 | init_bias=-2.19,
15 | final_kernel=3)),
16 | test_cfg=dict(pts=dict(nms_type='circle')))
17 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_pillar02_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_pillar02_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(
4 | pts_bbox_head=dict(
5 | separate_head=dict(
6 | type='DCNSeparateHead',
7 | dcn_config=dict(
8 | type='DCN',
9 | in_channels=64,
10 | out_channels=64,
11 | kernel_size=3,
12 | padding=1,
13 | groups=4),
14 | init_bias=-2.19,
15 | final_kernel=3)))
16 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(test_cfg=dict(pts=dict(nms_type='circle')))
4 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(
4 | pts_bbox_head=dict(
5 | separate_head=dict(
6 | type='DCNSeparateHead',
7 | dcn_config=dict(
8 | type='DCN',
9 | in_channels=64,
10 | out_channels=64,
11 | kernel_size=3,
12 | padding=1,
13 | groups=4),
14 | init_bias=-2.19,
15 | final_kernel=3)),
16 | test_cfg=dict(pts=dict(nms_type='circle')))
17 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_voxel0075_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(
4 | pts_bbox_head=dict(
5 | separate_head=dict(
6 | type='DCNSeparateHead',
7 | dcn_config=dict(
8 | type='DCN',
9 | in_channels=64,
10 | out_channels=64,
11 | kernel_size=3,
12 | padding=1,
13 | groups=4),
14 | init_bias=-2.19,
15 | final_kernel=3)))
16 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(test_cfg=dict(pts=dict(nms_type='circle')))
4 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn-circlenms_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(
4 | pts_bbox_head=dict(
5 | separate_head=dict(
6 | type='DCNSeparateHead',
7 | dcn_config=dict(
8 | type='DCN',
9 | in_channels=64,
10 | out_channels=64,
11 | kernel_size=3,
12 | padding=1,
13 | groups=4),
14 | init_bias=-2.19,
15 | final_kernel=3)),
16 | test_cfg=dict(pts=dict(nms_type='circle')))
17 |
--------------------------------------------------------------------------------
/configs/centerpoint/centerpoint_voxel01_second_secfpn_head-dcn_8xb4-cyclic-20e_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./centerpoint_voxel01_second_secfpn_8xb4-cyclic-20e_nus-3d.py']
2 |
3 | model = dict(
4 | pts_bbox_head=dict(
5 | separate_head=dict(
6 | type='DCNSeparateHead',
7 | dcn_config=dict(
8 | type='DCN',
9 | in_channels=64,
10 | out_channels=64,
11 | kernel_size=3,
12 | padding=1,
13 | groups=4),
14 | init_bias=-2.19,
15 | final_kernel=3)))
16 |
--------------------------------------------------------------------------------
/configs/cylinder3d/cylinder3d_4xb4-3x_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/datasets/semantickitti.py', '../_base_/models/cylinder3d.py',
3 | '../_base_/default_runtime.py'
4 | ]
5 |
6 | # optimizer
7 | lr = 0.001
8 | optim_wrapper = dict(
9 | type='OptimWrapper',
10 | optimizer=dict(type='AdamW', lr=lr, weight_decay=0.01))
11 |
12 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=1)
13 | val_cfg = dict(type='ValLoop')
14 | test_cfg = dict(type='TestLoop')
15 |
16 | # learning rate
17 | param_scheduler = [
18 | dict(
19 | type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
20 | end=1000),
21 | dict(
22 | type='MultiStepLR',
23 | begin=0,
24 | end=36,
25 | by_epoch=True,
26 | milestones=[30],
27 | gamma=0.1)
28 | ]
29 |
30 | train_dataloader = dict(batch_size=4, )
31 |
32 | # Default setting for scaling LR automatically
33 | # - `enable` means enable scaling LR automatically
34 | # or not by default.
35 | # - `base_batch_size` = (8 GPUs) x (4 samples per GPU).
36 | # auto_scale_lr = dict(enable=False, base_batch_size=32)
37 |
38 | default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=5))
39 |
--------------------------------------------------------------------------------
/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area1.py:
--------------------------------------------------------------------------------
1 | _base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
2 |
3 | # data settings
4 | train_area = [2, 3, 4, 5, 6]
5 | test_area = 1
6 | train_dataloader = dict(
7 | batch_size=32,
8 | dataset=dict(
9 | ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
10 | scene_idxs=[
11 | f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
12 | ]))
13 | test_dataloader = dict(
14 | dataset=dict(
15 | ann_files=f's3dis_infos_Area_{test_area}.pkl',
16 | scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
17 | val_dataloader = test_dataloader
18 |
--------------------------------------------------------------------------------
/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area2.py:
--------------------------------------------------------------------------------
1 | _base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
2 |
3 | # data settings
4 | train_area = [1, 3, 4, 5, 6]
5 | test_area = 2
6 | train_dataloader = dict(
7 | batch_size=32,
8 | dataset=dict(
9 | ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
10 | scene_idxs=[
11 | f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
12 | ]))
13 | test_dataloader = dict(
14 | dataset=dict(
15 | ann_files=f's3dis_infos_Area_{test_area}.pkl',
16 | scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
17 | val_dataloader = test_dataloader
18 |
--------------------------------------------------------------------------------
/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area3.py:
--------------------------------------------------------------------------------
1 | _base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
2 |
3 | # data settings
4 | train_area = [1, 2, 4, 5, 6]
5 | test_area = 3
6 | train_dataloader = dict(
7 | batch_size=32,
8 | dataset=dict(
9 | ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
10 | scene_idxs=[
11 | f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
12 | ]))
13 | test_dataloader = dict(
14 | dataset=dict(
15 | ann_files=f's3dis_infos_Area_{test_area}.pkl',
16 | scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
17 | val_dataloader = test_dataloader
18 |
--------------------------------------------------------------------------------
/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area4.py:
--------------------------------------------------------------------------------
1 | _base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
2 |
3 | # data settings
4 | train_area = [1, 2, 3, 5, 6]
5 | test_area = 4
6 | train_dataloader = dict(
7 | batch_size=32,
8 | dataset=dict(
9 | ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
10 | scene_idxs=[
11 | f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
12 | ]))
13 | test_dataloader = dict(
14 | dataset=dict(
15 | ann_files=f's3dis_infos_Area_{test_area}.pkl',
16 | scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
17 | val_dataloader = test_dataloader
18 |
--------------------------------------------------------------------------------
/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/datasets/s3dis-seg.py', '../_base_/models/dgcnn.py',
3 | '../_base_/schedules/seg-cosine-100e.py', '../_base_/default_runtime.py'
4 | ]
5 |
6 | # model settings
7 | model = dict(
8 | backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz]
9 | decode_head=dict(
10 | num_classes=13, ignore_index=13,
11 | loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight
12 | test_cfg=dict(
13 | num_points=4096,
14 | block_size=1.0,
15 | sample_rate=0.5,
16 | use_normalized_coord=True,
17 | batch_size=24))
18 |
19 | default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))
20 | train_dataloader = dict(batch_size=32)
21 | train_cfg = dict(val_interval=2)
22 |
--------------------------------------------------------------------------------
/configs/dgcnn/dgcnn_4xb32-cosine-100e_s3dis-seg_test-area6.py:
--------------------------------------------------------------------------------
1 | _base_ = './dgcnn_4xb32-cosine-100e_s3dis-seg_test-area5.py'
2 |
3 | # data settings
4 | train_area = [1, 2, 3, 4, 5]
5 | test_area = 6
6 | train_dataloader = dict(
7 | batch_size=32,
8 | dataset=dict(
9 | ann_files=[f's3dis_infos_Area_{i}.pkl' for i in train_area],
10 | scene_idxs=[
11 | f'seg_info/Area_{i}_resampled_scene_idxs.npy' for i in train_area
12 | ]))
13 | test_dataloader = dict(
14 | dataset=dict(
15 | ann_files=f's3dis_infos_Area_{test_area}.pkl',
16 | scene_idxs=f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'))
17 | val_dataloader = test_dataloader
18 |
--------------------------------------------------------------------------------
/configs/dynamic_voxelization/pointpillars_dv_secfpn_8xb6-160e_kitti-3d-car.py:
--------------------------------------------------------------------------------
1 | _base_ = '../pointpillars/pointpillars_hv_secfpn_8xb6-160e_kitti-3d-car.py'
2 |
3 | voxel_size = [0.16, 0.16, 4]
4 | point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
5 |
6 | model = dict(
7 | type='DynamicVoxelNet',
8 | data_preprocessor=dict(
9 | voxel_type='dynamic',
10 | voxel_layer=dict(
11 | max_num_points=-1,
12 | point_cloud_range=point_cloud_range,
13 | voxel_size=voxel_size,
14 | max_voxels=(-1, -1))),
15 | voxel_encoder=dict(
16 | type='DynamicPillarFeatureNet',
17 | in_channels=4,
18 | feat_channels=[64],
19 | with_distance=False,
20 | voxel_size=voxel_size,
21 | point_cloud_range=point_cloud_range))
22 |
--------------------------------------------------------------------------------
/configs/dynamic_voxelization/second_dv_secfpn_8xb2-cosine-80e_kitti-3d-3class.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/second_hv_secfpn_kitti.py',
3 | '../_base_/datasets/kitti-3d-3class.py', '../_base_/schedules/cosine.py',
4 | '../_base_/default_runtime.py'
5 | ]
6 |
7 | point_cloud_range = [0, -40, -3, 70.4, 40, 1]
8 | voxel_size = [0.05, 0.05, 0.1]
9 |
10 | model = dict(
11 | type='DynamicVoxelNet',
12 | data_preprocessor=dict(
13 | voxel_type='dynamic',
14 | voxel_layer=dict(
15 | _delete_=True,
16 | max_num_points=-1,
17 | point_cloud_range=point_cloud_range,
18 | voxel_size=voxel_size,
19 | max_voxels=(-1, -1))),
20 | voxel_encoder=dict(
21 | _delete_=True,
22 | type='DynamicSimpleVFE',
23 | voxel_size=voxel_size,
24 | point_cloud_range=point_cloud_range))
25 |
--------------------------------------------------------------------------------
/configs/dynamic_voxelization/second_dv_secfpn_8xb6-80e_kitti-3d-car.py:
--------------------------------------------------------------------------------
1 | _base_ = '../second/second_hv_secfpn_8xb6-80e_kitti-3d-car.py'
2 |
3 | point_cloud_range = [0, -40, -3, 70.4, 40, 1]
4 | voxel_size = [0.05, 0.05, 0.1]
5 |
6 | model = dict(
7 | type='DynamicVoxelNet',
8 | data_preprocessor=dict(
9 | voxel_type='dynamic',
10 | voxel_layer=dict(
11 | _delete_=True,
12 | max_num_points=-1,
13 | point_cloud_range=point_cloud_range,
14 | voxel_size=voxel_size,
15 | max_voxels=(-1, -1))),
16 | voxel_encoder=dict(
17 | _delete_=True,
18 | type='DynamicSimpleVFE',
19 | voxel_size=voxel_size,
20 | point_cloud_range=point_cloud_range))
21 |
--------------------------------------------------------------------------------
/configs/fcaf3d/fcaf3d_2xb8_s3dis-3d-5class.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/fcaf3d.py', '../_base_/default_runtime.py',
3 | '../_base_/datasets/s3dis-3d.py'
4 | ]
5 |
6 | model = dict(bbox_head=dict(num_classes=5))
7 |
8 | optim_wrapper = dict(
9 | type='OptimWrapper',
10 | optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001),
11 | clip_grad=dict(max_norm=10, norm_type=2))
12 |
13 | # learning rate
14 | param_scheduler = dict(
15 | type='MultiStepLR',
16 | begin=0,
17 | end=12,
18 | by_epoch=True,
19 | milestones=[8, 11],
20 | gamma=0.1)
21 |
22 | custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)]
23 |
24 | # training schedule for 1x
25 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12)
26 | val_cfg = dict(type='ValLoop')
27 | test_cfg = dict(type='TestLoop')
28 |
--------------------------------------------------------------------------------
/configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d_finetune.py:
--------------------------------------------------------------------------------
1 | _base_ = './fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py'
2 | # model settings
3 | model = dict(
4 | train_cfg=dict(
5 | code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05]))
6 | # optimizer
7 | optim_wrapper = dict(optimizer=dict(lr=0.001))
8 | load_from = 'work_dirs/fcos3d_nus/latest.pth'
9 |
--------------------------------------------------------------------------------
/configs/free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py'
2 |
3 | model = dict(
4 | pts_backbone=dict(
5 | _delete_=True,
6 | type='NoStemRegNet',
7 | arch='regnetx_1.6gf',
8 | init_cfg=dict(
9 | type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf'),
10 | out_indices=(1, 2, 3),
11 | frozen_stages=-1,
12 | strides=(1, 2, 2, 2),
13 | base_channels=64,
14 | stem_channels=64,
15 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
16 | norm_eval=False,
17 | style='pytorch'),
18 | pts_neck=dict(in_channels=[168, 408, 912]))
19 |
--------------------------------------------------------------------------------
/configs/free_anchor/pointpillars_hv_regnet-3.2gf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py'
2 |
3 | model = dict(
4 | pts_backbone=dict(
5 | _delete_=True,
6 | type='NoStemRegNet',
7 | arch='regnetx_3.2gf',
8 | init_cfg=dict(
9 | type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf'),
10 | out_indices=(1, 2, 3),
11 | frozen_stages=-1,
12 | strides=(1, 2, 2, 2),
13 | base_channels=64,
14 | stem_channels=64,
15 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
16 | norm_eval=False,
17 | style='pytorch'),
18 | pts_neck=dict(in_channels=[192, 432, 1008]))
19 |
--------------------------------------------------------------------------------
/configs/free_anchor/pointpillars_hv_regnet-400mf_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './pointpillars_hv_fpn_head-free-anchor_sbn-all_8xb4-2x_nus-3d.py'
2 |
3 | model = dict(
4 | pts_backbone=dict(
5 | _delete_=True,
6 | type='NoStemRegNet',
7 | arch='regnetx_400mf',
8 | init_cfg=dict(
9 | type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'),
10 | out_indices=(1, 2, 3),
11 | frozen_stages=-1,
12 | strides=(1, 2, 2, 2),
13 | base_channels=64,
14 | stem_channels=64,
15 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
16 | norm_eval=False,
17 | style='pytorch'),
18 | pts_neck=dict(in_channels=[64, 160, 384]))
19 |
--------------------------------------------------------------------------------
/configs/h3dnet/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: H3DNet
3 | Metadata:
4 | Training Data: ScanNet
5 | Training Techniques:
6 | - AdamW
7 | Training Resources: 8x GeForce GTX 1080 Ti
8 | Architecture:
9 | Paper:
10 | URL: https://arxiv.org/abs/2006.05682
11 | Title: 'H3DNet: 3D Object Detection Using Hybrid Geometric Primitives'
12 | README: configs/h3dnet/README.md
13 | Code:
14 | URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/h3dnet.py#L10
15 | Version: v0.6.0
16 |
17 | Models:
18 | - Name: h3dnet_3x8_scannet-3d-18class
19 | In Collection: H3DNet
20 | Config: configs/h3dnet/h3dnet_8xb3_scannet-seg.py
21 | Metadata:
22 | Training Memory (GB): 7.9
23 | Results:
24 | - Task: 3D Object Detection
25 | Dataset: ScanNet
26 | Metrics:
27 | AP@0.25: 66.07
28 | AP@0.5: 47.68
29 | Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/h3dnet/h3dnet_3x8_scannet-3d-18class/h3dnet_3x8_scannet-3d-18class_20210824_003149-414bd304.pth
30 |
--------------------------------------------------------------------------------
/configs/imvoxelnet/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: ImVoxelNet
3 | Metadata:
4 | Training Data: KITTI
5 | Training Techniques:
6 | - AdamW
7 | Training Resources: 8x Tesla P40
8 | Architecture:
9 | - Anchor3DHead
10 | Paper:
11 | URL: https://arxiv.org/abs/2106.01178
12 | Title: 'ImVoxelNet: Image to Voxels Projection for Monocular and Multi-View General-Purpose 3D Object Detection'
13 | README: configs/imvoxelnet/README.md
14 | Code:
15 | URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/imvoxelnet.py#L11
16 | Version: v0.15.0
17 |
18 | Models:
19 | - Name: imvoxelnet_kitti-3d-car
20 | In Collection: ImVoxelNet
21 | Config: configs/imvoxelnet/imvoxelnet_8xb4_kitti-3d-car.py
22 | Metadata:
23 | Training Memory (GB): 15.0
24 | Results:
25 | - Task: 3D Object Detection
26 | Dataset: KITTI
27 | Metrics:
28 | mAP: 17.26
29 | Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/imvoxelnet/imvoxelnet_4x8_kitti-3d-car/imvoxelnet_4x8_kitti-3d-car_20210830_003014-3d0ffdf4.pth
30 |
--------------------------------------------------------------------------------
/configs/minkunet/minkunet18_w16_torchsparse_8xb2-amp-15e_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./minkunet18_w32_torchsparse_8xb2-amp-15e_semantickitti.py']
2 |
3 | model = dict(
4 | backbone=dict(
5 | base_channels=16,
6 | encoder_channels=[16, 32, 64, 128],
7 | decoder_channels=[128, 64, 48, 48]),
8 | decode_head=dict(channels=48))
9 |
10 | # NOTE: Due to TorchSparse backend, the model performance is relatively
11 | # dependent on random seeds, and if random seeds are not specified the
12 | # model performance will be different (± 1.5 mIoU).
13 | randomness = dict(seed=1588147245)
14 |
--------------------------------------------------------------------------------
/configs/minkunet/minkunet18_w20_torchsparse_8xb2-amp-15e_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./minkunet18_w32_torchsparse_8xb2-amp-15e_semantickitti.py']
2 |
3 | model = dict(
4 | backbone=dict(
5 | base_channels=20,
6 | encoder_channels=[20, 40, 81, 163],
7 | decoder_channels=[163, 81, 61, 61]),
8 | decode_head=dict(channels=61))
9 |
--------------------------------------------------------------------------------
/configs/minkunet/minkunet34_w32_minkowski_8xb2-laser-polar-mix-3x_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | './minkunet34_w32_torchsparse_8xb2-laser-polar-mix-3x_semantickitti.py'
3 | ]
4 |
5 | model = dict(
6 | data_preprocessor=dict(batch_first=True),
7 | backbone=dict(sparseconv_backend='minkowski'))
8 |
--------------------------------------------------------------------------------
/configs/minkunet/minkunet34_w32_spconv_8xb2-amp-laser-polar-mix-3x_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | './minkunet34_w32_torchsparse_8xb2-laser-polar-mix-3x_semantickitti.py'
3 | ]
4 |
5 | model = dict(
6 | data_preprocessor=dict(batch_first=True),
7 | backbone=dict(sparseconv_backend='spconv'))
8 |
9 | optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic')
10 |
--------------------------------------------------------------------------------
/configs/minkunet/minkunet34_w32_spconv_8xb2-laser-polar-mix-3x_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | './minkunet34_w32_torchsparse_8xb2-laser-polar-mix-3x_semantickitti.py'
3 | ]
4 |
5 | model = dict(
6 | data_preprocessor=dict(batch_first=True),
7 | backbone=dict(sparseconv_backend='spconv'))
8 |
--------------------------------------------------------------------------------
/configs/minkunet/minkunet34_w32_torchsparse_8xb2-amp-laser-polar-mix-3x_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | './minkunet34_w32_torchsparse_8xb2-laser-polar-mix-3x_semantickitti.py'
3 | ]
4 |
5 | optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic')
6 |
--------------------------------------------------------------------------------
/configs/minkunet/minkunet34v2_w32_torchsparse_8xb2-amp-laser-polar-mix-3x_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | './minkunet34_w32_torchsparse_8xb2-amp-laser-polar-mix-3x_semantickitti.py'
3 | ]
4 |
5 | model = dict(
6 | backbone=dict(type='MinkUNetBackboneV2'),
7 | decode_head=dict(channels=256 + 128 + 96))
8 |
9 | randomness = dict(seed=None, deterministic=False, diff_rank_seed=True)
10 | env_cfg = dict(cudnn_benchmark=True)
11 |
--------------------------------------------------------------------------------
/configs/monoflex/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: MonoFlex
3 | Metadata:
4 | Training Data: KITTI
5 | Training Techniques:
6 | - Adam
7 | Training Resources: 2x V100 GPUS
8 | Architecture:
9 | - MonoFlexHead
10 | - DLA
11 | Paper:
12 | URL: https://arxiv.org/abs/2104.02323
13 | Title: 'Objects are Different: Flexible Monocular 3D Object Detection'
14 | README: configs/monoflex/README.md
15 | Code:
16 | URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/detectors/monoflex.py#L7
17 | Version: v1.0.0
18 |
19 | Models:
20 | - Name: monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d
21 | In Collection: MonoFlex
22 | Config: configs/monoflex/monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d.py
23 | Metadata:
24 | Training Memory (GB): 9.64
25 | Results:
26 | - Task: 3D Object Detection
27 | Dataset: KITTI
28 | Metrics:
29 | mAP: 21.86
30 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/monoflex/monoflex_dla34_pytorch_dlaneck_gn-all_2x4_6x_kitti-mono3d_20211228_027553-d46d9bb0.pth
31 |
--------------------------------------------------------------------------------
/configs/mvxnet/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: MVX-Net
3 | Metadata:
4 | Training Data: KITTI
5 | Training Techniques:
6 | - AdamW
7 | Training Resources: 8x V100 GPUs
8 | Architecture:
9 | - Feature Pyramid Network
10 | - Dynamic Voxelization
11 | Paper:
12 | URL: https://arxiv.org/abs/1904.01649
13 | Title: 'MVX-Net: Multimodal VoxelNet for 3D Object Detection'
14 | README: configs/mvxnet/README.md
15 | Code:
16 | URL: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/detectors/mvx_two_stage.py#L20
17 | Version: v0.5.0
18 |
19 | Models:
20 | - Name: dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class
21 | Alias: mvxnet_kitti-3class
22 | In Collection: MVX-Net
23 | Config: configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class.py
24 | Metadata:
25 | Training Memory (GB): 6.7
26 | Results:
27 | - Task: 3D Object Detection
28 | Dataset: KITTI
29 | Metrics:
30 | mAP: 63.5
31 | Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class/mvxnet_fpn_dv_second_secfpn_8xb2-80e_kitti-3d-3class-8963258a.pth
32 |
--------------------------------------------------------------------------------
/configs/nuimages/cascade-mask-rcnn-r50-fpn_coco-20e_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './cascade-mask-rcnn_r50_fpn_1x_nuim.py'
2 |
3 | # learning policy
4 | lr_config = dict(step=[16, 19])
5 | runner = dict(max_epochs=20)
6 |
7 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth' # noqa
8 |
--------------------------------------------------------------------------------
/configs/nuimages/cascade-mask-rcnn_r101_fpn_1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './cascade-mask-rcnn_r50_fpn_1x_nuim.py'
2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
3 |
--------------------------------------------------------------------------------
/configs/nuimages/cascade-mask-rcnn_r50_fpn_coco-20e-1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './cascade-mask-rcnn_r50_fpn_1x_nuim.py'
2 |
3 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth' # noqa
4 |
--------------------------------------------------------------------------------
/configs/nuimages/cascade-mask-rcnn_x101_32x4d_fpn_1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './cascade-mask-rcnn_r50_fpn_1x_nuim.py'
2 | model = dict(
3 | pretrained='open-mmlab://resnext101_32x4d',
4 | backbone=dict(
5 | type='ResNeXt',
6 | depth=101,
7 | groups=32,
8 | base_width=4,
9 | num_stages=4,
10 | out_indices=(0, 1, 2, 3),
11 | frozen_stages=1,
12 | norm_cfg=dict(type='BN', requires_grad=True),
13 | style='pytorch'))
14 |
--------------------------------------------------------------------------------
/configs/nuimages/htc_r50_fpn_coco-20e-1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './htc_r50_fpn_1x_nuim.py'
2 |
3 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth' # noqa
4 |
--------------------------------------------------------------------------------
/configs/nuimages/htc_r50_fpn_coco-20e_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './htc_r50_fpn_coco-20e-1x_nuim.py'
2 | # learning policy
3 | lr_config = dict(step=[16, 19])
4 | runner = dict(max_epochs=20)
5 |
--------------------------------------------------------------------------------
/configs/nuimages/htc_x101_64x4d_fpn_dconv_c3-c5_coco-20e-1xb16_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './htc_r50_fpn_1x_nuim.py'
2 | model = dict(
3 | pretrained='open-mmlab://resnext101_64x4d',
4 | backbone=dict(
5 | type='ResNeXt',
6 | depth=101,
7 | groups=64,
8 | base_width=4,
9 | num_stages=4,
10 | out_indices=(0, 1, 2, 3),
11 | frozen_stages=1,
12 | norm_cfg=dict(type='BN', requires_grad=True),
13 | norm_eval=True,
14 | style='pytorch',
15 | dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
16 | stage_with_dcn=(False, True, True, True)))
17 |
18 | data = dict(samples_per_gpu=1, workers_per_gpu=1)
19 | # learning policy
20 | lr_config = dict(step=[16, 19])
21 | runner = dict(max_epochs=20)
22 |
23 | load_from = 'http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth' # noqa
24 |
--------------------------------------------------------------------------------
/configs/nuimages/mask-rcnn_r101_fpn_1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './mask-rcnn_r50_fpn_1x_nuim.py'
2 | model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
3 |
--------------------------------------------------------------------------------
/configs/nuimages/mask-rcnn_r50_fpn_1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/mask-rcnn_r50_fpn.py',
3 | '../_base_/datasets/nuim-instance.py',
4 | '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py'
5 | ]
6 | model = dict(
7 | roi_head=dict(
8 | bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10)))
9 |
--------------------------------------------------------------------------------
/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/mask-rcnn_r50_fpn.py',
3 | '../_base_/datasets/nuim-instance.py',
4 | '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py'
5 | ]
6 | model = dict(
7 | roi_head=dict(
8 | bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10)))
9 | load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth' # noqa
10 |
--------------------------------------------------------------------------------
/configs/nuimages/mask-rcnn_r50_fpn_coco-2x_1x_nus-2d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/mask-rcnn_r50_fpn.py',
3 | '../_base_/datasets/nuim-instance.py',
4 | '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py'
5 | ]
6 | model = dict(
7 | roi_head=dict(
8 | bbox_head=dict(num_classes=10), mask_head=dict(num_classes=10)))
9 |
10 | backend_args = None
11 |
12 | test_pipeline = [
13 | dict(type='LoadImageFromFile', backend_args=backend_args),
14 | dict(
15 | type='MultiScaleFlipAug',
16 | img_scale=(1600, 900),
17 | flip=False,
18 | transforms=[
19 | dict(type='Resize', keep_ratio=True),
20 | dict(type='RandomFlip'),
21 | ]),
22 | dict(
23 | type='PackDetInputs',
24 | meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
25 | 'scale_factor')),
26 | ]
27 | data_root = 'data/nuimages/'
28 | # data = dict(
29 | # val=dict(
30 | # ann_file=data_root + 'annotations/nuimages_v1.0-mini.json'),
31 | # test=dict(
32 | # ann_file=data_root + 'annotations/nuimages_v1.0-mini.json'))
33 |
--------------------------------------------------------------------------------
/configs/nuimages/mask-rcnn_x101_32x4d_fpn_1x_nuim.py:
--------------------------------------------------------------------------------
1 | _base_ = './mask-rcnn_r50_fpn_1x_nuim.py'
2 | model = dict(
3 | pretrained='open-mmlab://resnext101_32x4d',
4 | backbone=dict(
5 | type='ResNeXt',
6 | depth=101,
7 | groups=32,
8 | base_width=4,
9 | num_stages=4,
10 | out_indices=(0, 1, 2, 3),
11 | frozen_stages=1,
12 | norm_cfg=dict(type='BN', requires_grad=True),
13 | style='pytorch'))
14 |
--------------------------------------------------------------------------------
/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d_finetune.py:
--------------------------------------------------------------------------------
1 | _base_ = './pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py'
2 | # model settings
3 | model = dict(
4 | train_cfg=dict(code_weight=[
5 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05, 0.2, 0.2, 0.2, 0.2
6 | ]))
7 | # optimizer
8 | optim_wrapper = dict(optimizer=dict(lr=0.002))
9 | load_from = 'work_dirs/pgd_nus_benchmark_1x/latest.pth'
10 |
--------------------------------------------------------------------------------
/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './pgd_r101-caffe_fpn_head-gn_16xb2-1x_nus-mono3d.py'
2 |
3 | # learning policy
4 | param_scheduler = [
5 | dict(
6 | type='LinearLR',
7 | start_factor=1.0 / 3,
8 | by_epoch=False,
9 | begin=0,
10 | end=500),
11 | dict(
12 | type='MultiStepLR',
13 | begin=0,
14 | end=24,
15 | by_epoch=True,
16 | milestones=[16, 22],
17 | gamma=0.1)
18 | ]
19 |
20 | train_cfg = dict(max_epochs=24)
21 |
--------------------------------------------------------------------------------
/configs/pgd/pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d_finetune.py:
--------------------------------------------------------------------------------
1 | _base_ = './pgd_r101-caffe_fpn_head-gn_16xb2-2x_nus-mono3d.py'
2 | # model settings
3 | model = dict(
4 | train_cfg=dict(code_weight=[
5 | 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05, 0.2, 0.2, 0.2, 0.2
6 | ]))
7 | # optimizer
8 | optim_wrapper = dict(optimizer=dict(lr=0.002))
9 | load_from = 'work_dirs/pgd_nus_benchmark_2x/latest.pth'
10 |
--------------------------------------------------------------------------------
/configs/point_rcnn/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: PointRCNN
3 | Metadata:
4 | Training Data: KITTI
5 | Training Techniques:
6 | - AdamW
7 | Training Resources: 8x Titan XP GPUs
8 | Architecture:
9 | - PointNet++
10 | Paper:
11 | URL: https://arxiv.org/abs/1812.04244
12 | Title: 'PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud'
13 | README: configs/point_rcnn/README.md
14 | Code:
15 | URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/detectors/point_rcnn.py#L8
16 | Version: v1.0.0
17 |
18 | Models:
19 | - Name: point-rcnn_8xb2_kitti-3d-3class
20 | In Collection: PointRCNN
21 | Config: configs/point_rcnn/point-rcnn_8xb2_kitti-3d-3class.py
22 | Metadata:
23 | Training Memory (GB): 4.6
24 | Results:
25 | - Task: 3D Object Detection
26 | Dataset: KITTI
27 | Metrics:
28 | mAP: 70.83
29 | Weights: https://download.openmmlab.com/mmdetection3d/v1.0.0_models/point_rcnn/point_rcnn_2x8_kitti-3d-3classes_20211208_151344.pth
30 |
--------------------------------------------------------------------------------
/configs/pointnet2/pointnet2_msg_2xb16-cosine-80e_s3dis-seg.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/datasets/s3dis-seg.py', '../_base_/models/pointnet2_msg.py',
3 | '../_base_/schedules/seg-cosine-50e.py', '../_base_/default_runtime.py'
4 | ]
5 |
6 | # model settings
7 | model = dict(
8 | backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz]
9 | decode_head=dict(
10 | num_classes=13, ignore_index=13,
11 | loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight
12 | test_cfg=dict(
13 | num_points=4096,
14 | block_size=1.0,
15 | sample_rate=0.5,
16 | use_normalized_coord=True,
17 | batch_size=24))
18 |
19 | # data settings
20 | train_dataloader = dict(batch_size=16)
21 |
22 | # runtime settings
23 | default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))
24 |
25 | # PointNet2-MSG needs longer training time than PointNet2-SSG
26 | train_cfg = dict(by_epoch=True, max_epochs=80, val_interval=2)
27 |
--------------------------------------------------------------------------------
/configs/pointnet2/pointnet2_ssg_2xb16-cosine-50e_s3dis-seg.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/datasets/s3dis-seg.py', '../_base_/models/pointnet2_ssg.py',
3 | '../_base_/schedules/seg-cosine-50e.py', '../_base_/default_runtime.py'
4 | ]
5 |
6 | # model settings
7 | model = dict(
8 | backbone=dict(in_channels=9), # [xyz, rgb, normalized_xyz]
9 | decode_head=dict(
10 | num_classes=13, ignore_index=13,
11 | loss_decode=dict(class_weight=None)), # S3DIS doesn't use class_weight
12 | test_cfg=dict(
13 | num_points=4096,
14 | block_size=1.0,
15 | sample_rate=0.5,
16 | use_normalized_coord=True,
17 | batch_size=24))
18 |
19 | # data settings
20 | train_dataloader = dict(batch_size=16)
21 |
22 | # runtime settings
23 | default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))
24 | train_cfg = dict(val_interval=2)
25 |
--------------------------------------------------------------------------------
/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d-range100.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_fpn_range100_lyft.py',
3 | '../_base_/datasets/lyft-3d-range100.py',
4 | '../_base_/schedules/schedule-2x.py', '../_base_/default_runtime.py'
5 | ]
6 | # Default setting for scaling LR automatically
7 | # - `enable` means enable scaling LR automatically
8 | # or not by default.
9 | # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
10 | auto_scale_lr = dict(enable=False, base_batch_size=16)
11 |
--------------------------------------------------------------------------------
/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-2x_lyft-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_fpn_lyft.py',
3 | '../_base_/datasets/lyft-3d.py', '../_base_/schedules/schedule-2x.py',
4 | '../_base_/default_runtime.py'
5 | ]
6 | # Default setting for scaling LR automatically
7 | # - `enable` means enable scaling LR automatically
8 | # or not by default.
9 | # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
10 | auto_scale_lr = dict(enable=False, base_batch_size=16)
11 |
--------------------------------------------------------------------------------
/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb2-amp-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py'
2 | train_dataloader = dict(batch_size=2, num_workers=2)
3 | # schedule settings
4 | optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=4096.)
5 |
--------------------------------------------------------------------------------
/configs/pointpillars/pointpillars_hv_fpn_sbn-all_8xb4-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_fpn_nus.py',
3 | '../_base_/datasets/nus-3d.py', '../_base_/schedules/schedule-2x.py',
4 | '../_base_/default_runtime.py'
5 | ]
6 |
7 | # For nuScenes dataset, we usually evaluate the model at the end of training.
8 | # Since the models are trained by 24 epochs by default, we set evaluation
9 | # interval to be 24. Please change the interval accordingly if you do not
10 | # use a default schedule.
11 | train_cfg = dict(val_interval=24)
12 |
--------------------------------------------------------------------------------
/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymo-3d-3class.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_secfpn_waymo.py',
3 | '../_base_/datasets/waymoD5-3d-3class.py',
4 | '../_base_/schedules/schedule-2x.py',
5 | '../_base_/default_runtime.py',
6 | ]
7 |
8 | # data settings
9 | train_dataloader = dict(dataset=dict(dataset=dict(load_interval=1)))
10 | # Default setting for scaling LR automatically
11 | # - `enable` means enable scaling LR automatically
12 | # or not by default.
13 | # - `base_batch_size` = (16 GPUs) x (2 samples per GPU).
14 | auto_scale_lr = dict(enable=False, base_batch_size=32)
15 |
--------------------------------------------------------------------------------
/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_16xb2-2x_waymoD5-3d-3class.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_secfpn_waymo.py',
3 | '../_base_/datasets/waymoD5-3d-3class.py',
4 | '../_base_/schedules/schedule-2x.py',
5 | '../_base_/default_runtime.py',
6 | ]
7 |
--------------------------------------------------------------------------------
/configs/pointpillars/pointpillars_hv_secfpn_sbn-all_8xb2-amp-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './pointpillars_hv_secfpn_sbn-all_8xb4-2x_nus-3d.py'
2 | train_dataloader = dict(batch_size=2, num_workers=2)
3 | # schedule settings
4 | optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=4096.)
5 |
--------------------------------------------------------------------------------
/configs/pv_rcnn/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: PV-RCNN
3 | Metadata:
4 | Training Data: KITTI
5 | Training Techniques:
6 | - AdamW
7 | Training Resources: 8x A100 GPUs
8 | Architecture:
9 | - Feature Pyramid Network
10 | Paper:
11 | URL: https://arxiv.org/abs/1912.13192
12 | Title: 'PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection'
13 | README: configs/pv_rcnn/README.md
14 | Code:
15 | URL: https://github.com/open-mmlab/mmdetection3d/blob/dev-1.x/mmdet3d/models/detectors/pv_rcnn.py#L12
16 | Version: v1.1.0rc2
17 |
18 | Models:
19 | - Name: pv_rcnn_8xb2-80e_kitti-3d-3class
20 | In Collection: PV-RCNN
21 | Config: configs/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class.py
22 | Metadata:
23 | Training Memory (GB): 5.4
24 | Results:
25 | - Task: 3D Object Detection
26 | Dataset: KITTI
27 | Metrics:
28 | mAP: 72.28
29 | Weights: https://download.openmmlab.com/mmdetection3d/v1.1.0_models/pv_rcnn/pv_rcnn_8xb2-80e_kitti-3d-3class/pv_rcnn_8xb2-80e_kitti-3d-3class_20221117_234428-b384d22f.pth
30 |
--------------------------------------------------------------------------------
/configs/regnet/pointpillars_hv_regnet-1.6gf_fpn_sbn-all_8xb4-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_fpn_nus.py',
3 | '../_base_/datasets/nus-3d.py',
4 | '../_base_/schedules/schedule-2x.py',
5 | '../_base_/default_runtime.py',
6 | ]
7 | # model settings
8 | model = dict(
9 | type='MVXFasterRCNN',
10 | pts_backbone=dict(
11 | _delete_=True,
12 | type='NoStemRegNet',
13 | arch='regnetx_1.6gf',
14 | init_cfg=dict(
15 | type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf'),
16 | out_indices=(1, 2, 3),
17 | frozen_stages=-1,
18 | strides=(1, 2, 2, 2),
19 | base_channels=64,
20 | stem_channels=64,
21 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
22 | norm_eval=False,
23 | style='pytorch'),
24 | pts_neck=dict(in_channels=[168, 408, 912]))
25 |
--------------------------------------------------------------------------------
/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb2-2x_lyft-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_fpn_lyft.py',
3 | '../_base_/datasets/lyft-3d.py',
4 | '../_base_/schedules/schedule-2x.py',
5 | '../_base_/default_runtime.py',
6 | ]
7 | # model settings
8 | model = dict(
9 | type='MVXFasterRCNN',
10 | pts_backbone=dict(
11 | _delete_=True,
12 | type='NoStemRegNet',
13 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
14 | init_cfg=dict(
15 | type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'),
16 | out_indices=(1, 2, 3),
17 | frozen_stages=-1,
18 | strides=(1, 2, 2, 2),
19 | base_channels=64,
20 | stem_channels=64,
21 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
22 | norm_eval=False,
23 | style='pytorch'),
24 | pts_neck=dict(in_channels=[64, 160, 384]))
25 | # Default setting for scaling LR automatically
26 | # - `enable` means enable scaling LR automatically
27 | # or not by default.
28 | # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
29 | auto_scale_lr = dict(enable=False, base_batch_size=16)
30 |
--------------------------------------------------------------------------------
/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_8xb4-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_fpn_nus.py',
3 | '../_base_/datasets/nus-3d.py',
4 | '../_base_/schedules/schedule-2x.py',
5 | '../_base_/default_runtime.py',
6 | ]
7 | # model settings
8 | model = dict(
9 | type='MVXFasterRCNN',
10 | pts_backbone=dict(
11 | _delete_=True,
12 | type='NoStemRegNet',
13 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
14 | init_cfg=dict(
15 | type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'),
16 | out_indices=(1, 2, 3),
17 | frozen_stages=-1,
18 | strides=(1, 2, 2, 2),
19 | base_channels=64,
20 | stem_channels=64,
21 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
22 | norm_eval=False,
23 | style='pytorch'),
24 | pts_neck=dict(in_channels=[64, 160, 384]))
25 |
--------------------------------------------------------------------------------
/configs/regnet/pointpillars_hv_regnet-400mf_fpn_sbn-all_range100_8xb2-2x_lyft-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/pointpillars_hv_fpn_range100_lyft.py',
3 | '../_base_/datasets/lyft-3d-range100.py',
4 | '../_base_/schedules/schedule-2x.py',
5 | '../_base_/default_runtime.py',
6 | ]
7 | # model settings
8 | model = dict(
9 | type='MVXFasterRCNN',
10 | pts_backbone=dict(
11 | _delete_=True,
12 | type='NoStemRegNet',
13 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
14 | init_cfg=dict(
15 | type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'),
16 | out_indices=(1, 2, 3),
17 | frozen_stages=-1,
18 | strides=(1, 2, 2, 2),
19 | base_channels=64,
20 | stem_channels=64,
21 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
22 | norm_eval=False,
23 | style='pytorch'),
24 | pts_neck=dict(in_channels=[64, 160, 384]))
25 | # Default setting for scaling LR automatically
26 | # - `enable` means enable scaling LR automatically
27 | # or not by default.
28 | # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
29 | auto_scale_lr = dict(enable=False, base_batch_size=16)
30 |
--------------------------------------------------------------------------------
/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-3class.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/second_hv_secfpn_kitti.py',
3 | '../_base_/datasets/kitti-3d-3class.py',
4 | '../_base_/schedules/cyclic-40e.py', '../_base_/default_runtime.py'
5 | ]
6 |
--------------------------------------------------------------------------------
/configs/second/second_hv_secfpn_8xb6-80e_kitti-3d-car.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../_base_/models/second_hv_secfpn_kitti.py',
3 | '../_base_/datasets/kitti-3d-car.py', '../_base_/schedules/cyclic-40e.py',
4 | '../_base_/default_runtime.py'
5 | ]
6 | point_cloud_range = [0, -40, -3, 70.4, 40, 1]
7 | model = dict(
8 | bbox_head=dict(
9 | type='Anchor3DHead',
10 | num_classes=1,
11 | anchor_generator=dict(
12 | _delete_=True,
13 | type='Anchor3DRangeGenerator',
14 | ranges=[[0, -40.0, -1.78, 70.4, 40.0, -1.78]],
15 | sizes=[[3.9, 1.6, 1.56]],
16 | rotations=[0, 1.57],
17 | reshape_out=True)),
18 | # model training and testing settings
19 | train_cfg=dict(
20 | _delete_=True,
21 | assigner=dict(
22 | type='Max3DIoUAssigner',
23 | iou_calculator=dict(type='BboxOverlapsNearest3D'),
24 | pos_iou_thr=0.6,
25 | neg_iou_thr=0.45,
26 | min_pos_iou=0.45,
27 | ignore_iof_thr=-1),
28 | allowed_border=0,
29 | pos_weight=-1,
30 | debug=False))
31 |
--------------------------------------------------------------------------------
/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-3class.py:
--------------------------------------------------------------------------------
1 | _base_ = 'second_hv_secfpn_8xb6-80e_kitti-3d-3class.py'
2 |
3 | # schedule settings
4 | optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=4096.)
5 |
--------------------------------------------------------------------------------
/configs/second/second_hv_secfpn_8xb6-amp-80e_kitti-3d-car.py:
--------------------------------------------------------------------------------
1 | _base_ = 'second_hv_secfpn_8xb6-80e_kitti-3d-car.py'
2 |
3 | # schedule settings
4 | optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=4096.)
5 |
--------------------------------------------------------------------------------
/configs/smoke/metafile.yml:
--------------------------------------------------------------------------------
1 | Collections:
2 | - Name: SMOKE
3 | Metadata:
4 | Training Data: KITTI
5 | Training Techniques:
6 | - Adam
7 | Training Resources: 4x V100 GPUS
8 | Architecture:
9 | - SMOKEMono3DHead
10 | - DLA
11 | Paper:
12 | URL: https://arxiv.org/abs/2002.10111
13 | Title: 'SMOKE: Single-Stage Monocular 3D Object Detection via Keypoint Estimation'
14 | README: configs/smoke/README.md
15 | Code:
16 | URL: https://github.com/open-mmlab/mmdetection3d/blob/v1.0.0.dev0/mmdet3d/models/detectors/smoke_mono3d.py#L7
17 | Version: v1.0.0
18 |
19 | Models:
20 | - Name: smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d
21 | In Collection: SMOKE
22 | Config: configs/smoke/smoke_dla34_dlaneck_gn-all_4xb8-6x_kitti-mono3d.py
23 | Metadata:
24 | Training Memory (GB): 9.6
25 | Results:
26 | - Task: 3D Object Detection
27 | Dataset: KITTI
28 | Metrics:
29 | mAP: 13.8
30 | Weights: https://download.openmmlab.com/mmdetection3d/v0.1.0_models/smoke/smoke_dla34_pytorch_dlaneck_gn-all_8x4_6x_kitti-mono3d_20210929_015553-d46d9bb0.pth
31 |
--------------------------------------------------------------------------------
/configs/spvcnn/spvcnn_w16_8xb2-amp-15e_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./spvcnn_w32_8xb2-amp-15e_semantickitti.py']
2 |
3 | model = dict(
4 | backbone=dict(
5 | base_channels=16,
6 | encoder_channels=[16, 32, 64, 128],
7 | decoder_channels=[128, 64, 48, 48]),
8 | decode_head=dict(channels=48))
9 |
10 | randomness = dict(seed=1588147245)
11 |
--------------------------------------------------------------------------------
/configs/spvcnn/spvcnn_w20_8xb2-amp-15e_semantickitti.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./spvcnn_w32_8xb2-amp-15e_semantickitti.py']
2 |
3 | model = dict(
4 | backbone=dict(
5 | base_channels=20,
6 | encoder_channels=[20, 40, 81, 163],
7 | decoder_channels=[163, 81, 61, 61]),
8 | decode_head=dict(channels=61))
9 |
--------------------------------------------------------------------------------
/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb1-2x_lyft-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './ssn_hv_secfpn_sbn-all_16xb2-2x_lyft-3d.py'
2 | # model settings
3 | model = dict(
4 | type='MVXFasterRCNN',
5 | pts_backbone=dict(
6 | _delete_=True,
7 | type='NoStemRegNet',
8 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
9 | init_cfg=dict(
10 | type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'),
11 | out_indices=(1, 2, 3),
12 | frozen_stages=-1,
13 | strides=(1, 2, 2, 2),
14 | base_channels=64,
15 | stem_channels=64,
16 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
17 | norm_eval=False,
18 | style='pytorch'),
19 | pts_neck=dict(in_channels=[64, 160, 384]))
20 | # dataset settings
21 | train_dataloader = dict(batch_size=1, num_workers=2)
22 |
--------------------------------------------------------------------------------
/configs/ssn/ssn_hv_regnet-400mf_secfpn_sbn-all_16xb2-2x_nus-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = './ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py'
2 | # model settings
3 | model = dict(
4 | type='MVXFasterRCNN',
5 | data_preprocessor=dict(type='Det3DDataPreprocessor'),
6 | pts_backbone=dict(
7 | _delete_=True,
8 | type='NoStemRegNet',
9 | arch=dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
10 | init_cfg=dict(
11 | type='Pretrained', checkpoint='open-mmlab://regnetx_400mf'),
12 | out_indices=(1, 2, 3),
13 | frozen_stages=-1,
14 | strides=(1, 2, 2, 2),
15 | base_channels=64,
16 | stem_channels=64,
17 | norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01),
18 | norm_eval=False,
19 | style='pytorch'),
20 | pts_neck=dict(in_channels=[64, 160, 384]))
21 |
--------------------------------------------------------------------------------
/configs/votenet/votenet_8xb16_sunrgbd-3d.py:
--------------------------------------------------------------------------------
1 | # TODO refactor the config of sunrgbd
2 | _base_ = [
3 | '../_base_/datasets/sunrgbd-3d.py', '../_base_/models/votenet.py',
4 | '../_base_/schedules/schedule-3x.py', '../_base_/default_runtime.py'
5 | ]
6 | # model settings
7 | model = dict(
8 | bbox_head=dict(
9 | num_classes=10,
10 | bbox_coder=dict(
11 | type='PartialBinBasedBBoxCoder',
12 | num_sizes=10,
13 | num_dir_bins=12,
14 | with_rot=True,
15 | mean_sizes=[
16 | [2.114256, 1.620300, 0.927272], [0.791118, 1.279516, 0.718182],
17 | [0.923508, 1.867419, 0.845495], [0.591958, 0.552978, 0.827272],
18 | [0.699104, 0.454178, 0.75625], [0.69519, 1.346299, 0.736364],
19 | [0.528526, 1.002642, 1.172878], [0.500618, 0.632163, 0.683424],
20 | [0.404671, 1.071108, 1.688889], [0.76584, 1.398258, 0.472728]
21 | ]),
22 | ))
23 | # Default setting for scaling LR automatically
24 | # - `enable` means enable scaling LR automatically
25 | # or not by default.
26 | # - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
27 | auto_scale_lr = dict(enable=False, base_batch_size=128)
28 |
--------------------------------------------------------------------------------
/configs/votenet/votenet_head-iouloss_8xb8_scannet-3d.py:
--------------------------------------------------------------------------------
1 | _base_ = ['./votenet_8xb8_scannet-3d.py']
2 |
3 | # model settings, add iou loss
4 | model = dict(
5 | bbox_head=dict(
6 | iou_loss=dict(
7 | type='AxisAlignedIoULoss', reduction='sum', loss_weight=10.0 /
8 | 3.0)))
9 |
--------------------------------------------------------------------------------
/data/s3dis/meta_data/class_names.txt:
--------------------------------------------------------------------------------
1 | ceiling
2 | floor
3 | wall
4 | beam
5 | column
6 | window
7 | door
8 | table
9 | chair
10 | sofa
11 | bookcase
12 | board
13 | clutter
14 |
--------------------------------------------------------------------------------
/data/scannet/meta_data/scannet_means.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/data/scannet/meta_data/scannet_means.npz
--------------------------------------------------------------------------------
/dataset-index.yml:
--------------------------------------------------------------------------------
1 | kitti:
2 | # The name of dataset in OpenDataLab referring to
3 | # https://opendatalab.com/KITTI_Object/cli. You can also download it
4 | # by running `odl get ${dataset}` independently
5 | dataset: KITTI_Object
6 | download_root: data
7 | data_root: data/kitti
8 | # Scripts for unzipping datasets
9 | script: tools/dataset_converters/kitti_unzip.sh
10 |
11 | nuscenes:
12 | # The name of dataset in OpenDataLab referring to
13 | # https://opendatalab.com/nuScenes/cli. You can also download it
14 | # by running `odl get ${dataset}` independently
15 | dataset: nuScenes
16 | download_root: data
17 | data_root: data/nuscenes
18 | # Scripts for unzipping datasets
19 | script: tools/dataset_converters/nuscenes_unzip.sh
20 |
21 | semantickitti:
22 | # The name of dataset in OpenDataLab referring to
23 | # https://opendatalab.com/SemanticKITTI/cli. You can also download it
24 | # by running `odl get ${dataset}` independently
25 | dataset: SemanticKITTI
26 | download_root: data
27 | data_root: data/semantickitti
28 | # Scripts for unzipping datasets
29 | script: tools/dataset_converters/semantickitti_unzip.sh
30 |
--------------------------------------------------------------------------------
/demo/data/kitti/000008.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/kitti/000008.bin
--------------------------------------------------------------------------------
/demo/data/kitti/000008.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/kitti/000008.pkl
--------------------------------------------------------------------------------
/demo/data/kitti/000008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/kitti/000008.png
--------------------------------------------------------------------------------
/demo/data/kitti/000008.txt:
--------------------------------------------------------------------------------
1 | 721.5377 0.0 609.5593 44.85728 0.0 721.5377 172.854 0.2163791 0.0 0.0 1.0 0.002745884 0.0 0.0 0.0 1.0
2 |
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800.pkl
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK_LEFT__1532402927647423.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK_LEFT__1532402927647423.jpg
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK_RIGHT__1532402927627893.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK_RIGHT__1532402927627893.jpg
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_BACK__1532402927637525.jpg
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_FRONT_LEFT__1532402927604844.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_FRONT_LEFT__1532402927604844.jpg
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_FRONT_RIGHT__1532402927620339.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_FRONT_RIGHT__1532402927620339.jpg
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_FRONT__1532402927612460.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__CAM_FRONT__1532402927612460.jpg
--------------------------------------------------------------------------------
/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin
--------------------------------------------------------------------------------
/demo/data/scannet/scene0000_00.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/scannet/scene0000_00.bin
--------------------------------------------------------------------------------
/demo/data/sunrgbd/000017.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/sunrgbd/000017.bin
--------------------------------------------------------------------------------
/demo/data/sunrgbd/000017.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/sunrgbd/000017.jpg
--------------------------------------------------------------------------------
/demo/data/sunrgbd/sunrgbd_000017_infos.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/demo/data/sunrgbd/sunrgbd_000017_infos.pkl
--------------------------------------------------------------------------------
/docker/serve/config.properties:
--------------------------------------------------------------------------------
1 | inference_address=http://0.0.0.0:8080
2 | management_address=http://0.0.0.0:8081
3 | metrics_address=http://0.0.0.0:8082
4 | model_store=/home/model-server/model-store
5 | load_models=all
6 |
--------------------------------------------------------------------------------
/docker/serve/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | if [[ "$1" = "serve" ]]; then
5 | shift 1
6 | torchserve --start --ts-config /home/model-server/config.properties
7 | else
8 | eval "$@"
9 | fi
10 |
11 | # prevent docker exit
12 | tail -f /dev/null
13 |
--------------------------------------------------------------------------------
/docs/en/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/en/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../image/mmdet3d-logo.png");
3 | background-size: 182.5px 40px;
4 | height: 40px;
5 | width: 182.5px;
6 | }
7 |
--------------------------------------------------------------------------------
/docs/en/_static/image/mmdet3d-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/docs/en/_static/image/mmdet3d-logo.png
--------------------------------------------------------------------------------
/docs/en/advanced_guides/datasets/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 3
3 |
4 | kitti.md
5 | nuscenes.md
6 | lyft.md
7 | waymo.md
8 | sunrgbd.md
9 | scannet.md
10 | s3dis.md
11 | semantickitti.md
12 |
--------------------------------------------------------------------------------
/docs/en/advanced_guides/index.rst:
--------------------------------------------------------------------------------
1 | Datasets
2 | **************
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | datasets/index.rst
8 |
9 |
10 | Supported Tasks
11 | **************
12 |
13 | .. toctree::
14 | :maxdepth: 1
15 |
16 | supported_tasks/index.rst
17 |
18 |
19 | Customization
20 | **************
21 |
22 | .. toctree::
23 | :maxdepth: 1
24 |
25 | customize_dataset.md
26 | customize_models.md
27 | customize_runtime.md
28 |
--------------------------------------------------------------------------------
/docs/en/advanced_guides/supported_tasks/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 1
3 |
4 | lidar_det3d.md
5 | vision_det3d.md
6 | lidar_sem_seg3d.md
7 |
--------------------------------------------------------------------------------
/docs/en/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to MMDetection3D's documentation!
2 | ==========================================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 | :caption: Get Started
7 |
8 | get_started.md
9 |
10 | .. toctree::
11 | :maxdepth: 2
12 | :caption: User Guides
13 |
14 | user_guides/index.rst
15 |
16 | .. toctree::
17 | :maxdepth: 2
18 | :caption: Advanced Guides
19 |
20 | advanced_guides/index.rst
21 |
22 | .. toctree::
23 | :maxdepth: 1
24 | :caption: Migrating from MMDetection3D 1.0
25 |
26 | migration.md
27 |
28 | .. toctree::
29 | :maxdepth: 1
30 | :caption: API Reference
31 |
32 | api.rst
33 |
34 | .. toctree::
35 | :maxdepth: 1
36 | :caption: Model Zoo
37 |
38 | model_zoo.md
39 |
40 | .. toctree::
41 | :maxdepth: 1
42 | :caption: Notes
43 |
44 | notes/index.rst
45 |
46 | .. toctree::
47 | :caption: Switch Language
48 |
49 | switch_language.md
50 |
51 | Indices and tables
52 | ==================
53 |
54 | * :ref:`genindex`
55 | * :ref:`search`
56 |
--------------------------------------------------------------------------------
/docs/en/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/en/notes/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 1
3 |
4 | benchmarks.md
5 | changelog_v1.0.x.md
6 | changelog.md
7 | compatibility.md
8 | faq.md
9 | contribution_guides.md
10 |
--------------------------------------------------------------------------------
/docs/en/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/docs/en/user_guides/index.rst:
--------------------------------------------------------------------------------
1 | Train & Test
2 | **************
3 | .. toctree::
4 | :maxdepth: 1
5 |
6 | config.md
7 | coord_sys_tutorial.md
8 | dataset_prepare.md
9 | data_pipeline.md
10 | train_test.md
11 | inference.md
12 | new_data_model.md
13 |
14 | Useful Tools
15 | ************
16 | .. toctree::
17 | :maxdepth: 1
18 |
19 | useful_tools.md
20 | visualization.md
21 | backends_support.md
22 | model_deployment.md
23 |
--------------------------------------------------------------------------------
/docs/en/user_guides/model_deployment.md:
--------------------------------------------------------------------------------
1 | # Model Deployment
2 |
3 | MMDet3D 1.1 fully relies on [MMDeploy](https://mmdeploy.readthedocs.io/) to deploy models.
4 | Please stay tuned and this document will be update soon.
5 |
--------------------------------------------------------------------------------
/docs/zh_cn/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/zh_cn/_static/css/readthedocs.css:
--------------------------------------------------------------------------------
1 | .header-logo {
2 | background-image: url("../image/mmdet3d-logo.png");
3 | background-size: 182.5px 40px;
4 | height: 40px;
5 | width: 182.5px;
6 | }
7 |
--------------------------------------------------------------------------------
/docs/zh_cn/_static/image/mmdet3d-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/docs/zh_cn/_static/image/mmdet3d-logo.png
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/datasets/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 3
3 |
4 | kitti.md
5 | nuscenes.md
6 | lyft.md
7 | waymo.md
8 | sunrgbd.md
9 | scannet.md
10 | s3dis.md
11 | semantickitti.md
12 |
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/index.rst:
--------------------------------------------------------------------------------
1 | 数据集
2 | **************
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | datasets/index.rst
8 |
9 |
10 | 支持的任务
11 | **************
12 |
13 | .. toctree::
14 | :maxdepth: 1
15 |
16 | supported_tasks/index.rst
17 |
18 |
19 | 自定义项目
20 | **************
21 |
22 | .. toctree::
23 | :maxdepth: 1
24 |
25 | customize_dataset.md
26 | customize_models.md
27 | customize_runtime.md
28 |
--------------------------------------------------------------------------------
/docs/zh_cn/advanced_guides/supported_tasks/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 1
3 |
4 | lidar_det3d.md
5 | vision_det3d.md
6 | lidar_sem_seg3d.md
7 |
--------------------------------------------------------------------------------
/docs/zh_cn/index.rst:
--------------------------------------------------------------------------------
1 | 欢迎来到 MMDetection3D 文档!
2 | ==========================================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 | :caption: 开始你的第一步
7 |
8 | get_started.md
9 |
10 | .. toctree::
11 | :maxdepth: 2
12 | :caption: 使用指南
13 |
14 | user_guides/index.rst
15 |
16 | .. toctree::
17 | :maxdepth: 2
18 | :caption: 进阶教程
19 |
20 | advanced_guides/index.rst
21 |
22 | .. toctree::
23 | :maxdepth: 1
24 | :caption: 迁移版本
25 |
26 | migration.md
27 |
28 | .. toctree::
29 | :maxdepth: 1
30 | :caption: 接口文档(英文)
31 |
32 | api.rst
33 |
34 | .. toctree::
35 | :maxdepth: 1
36 | :caption: 模型仓库
37 |
38 | model_zoo.md
39 |
40 | .. toctree::
41 | :maxdepth: 1
42 | :caption: 说明
43 |
44 | notes/index.rst
45 |
46 | .. toctree::
47 | :caption: 语言切换
48 |
49 | switch_language.md
50 |
51 | Indices and tables
52 | ==================
53 |
54 | * :ref:`genindex`
55 | * :ref:`search`
56 |
--------------------------------------------------------------------------------
/docs/zh_cn/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/zh_cn/notes/changelog.md:
--------------------------------------------------------------------------------
1 | # v1.1 变更日志
2 |
--------------------------------------------------------------------------------
/docs/zh_cn/notes/changelog_v1.0.x.md:
--------------------------------------------------------------------------------
1 | # v1.0.x 变更日志
2 |
--------------------------------------------------------------------------------
/docs/zh_cn/notes/compatibility.md:
--------------------------------------------------------------------------------
1 | # 兼容性
2 |
--------------------------------------------------------------------------------
/docs/zh_cn/notes/index.rst:
--------------------------------------------------------------------------------
1 | .. toctree::
2 | :maxdepth: 3
3 |
4 | benchmarks.md
5 | changelog_v1.0.x.md
6 | changelog.md
7 | compatibility.md
8 | faq.md
9 |
--------------------------------------------------------------------------------
/docs/zh_cn/switch_language.md:
--------------------------------------------------------------------------------
1 | ## English
2 |
3 | ## 简体中文
4 |
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/index.rst:
--------------------------------------------------------------------------------
1 | 训练和测试
2 | **************
3 | .. toctree::
4 | :maxdepth: 1
5 |
6 | config.md
7 | coord_sys_tutorial.md
8 | dataset_prepare.md
9 | data_pipeline.md
10 | train_test.md
11 | inference.md
12 | new_data_model.md
13 |
14 | 实用工具
15 | ************
16 | .. toctree::
17 | :maxdepth: 1
18 |
19 | useful_tools.md
20 | visualization.md
21 | backends_support.md
22 | model_deployment.md
23 |
--------------------------------------------------------------------------------
/docs/zh_cn/user_guides/model_deployment.md:
--------------------------------------------------------------------------------
1 | # 模型部署(待更新)
2 |
3 | MMDet3D 1.1 完全基于 [MMDeploy](https://mmdeploy.readthedocs.io/) 來部署模型。
4 | 我们将在下一个版本完善这个文档。
5 |
--------------------------------------------------------------------------------
/mmdet3d/apis/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .inference import (convert_SyncBN, inference_detector,
3 | inference_mono_3d_detector,
4 | inference_multi_modality_detector, inference_segmentor,
5 | init_model)
6 | from .inferencers import (Base3DInferencer, LidarDet3DInferencer,
7 | LidarSeg3DInferencer, MonoDet3DInferencer,
8 | MultiModalityDet3DInferencer)
9 |
10 | __all__ = [
11 | 'inference_detector', 'init_model', 'inference_mono_3d_detector',
12 | 'convert_SyncBN', 'inference_multi_modality_detector',
13 | 'inference_segmentor', 'Base3DInferencer', 'MonoDet3DInferencer',
14 | 'LidarDet3DInferencer', 'LidarSeg3DInferencer',
15 | 'MultiModalityDet3DInferencer'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmdet3d/apis/inferencers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_3d_inferencer import Base3DInferencer
3 | from .lidar_det3d_inferencer import LidarDet3DInferencer
4 | from .lidar_seg3d_inferencer import LidarSeg3DInferencer
5 | from .mono_det3d_inferencer import MonoDet3DInferencer
6 | from .multi_modality_det3d_inferencer import MultiModalityDet3DInferencer
7 |
8 | __all__ = [
9 | 'Base3DInferencer', 'MonoDet3DInferencer', 'LidarDet3DInferencer',
10 | 'LidarSeg3DInferencer', 'MultiModalityDet3DInferencer'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmdet3d/configs/_base_/default_runtime.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.hooks.checkpoint_hook import CheckpointHook
3 | from mmengine.hooks.iter_timer_hook import IterTimerHook
4 | from mmengine.hooks.logger_hook import LoggerHook
5 | from mmengine.hooks.param_scheduler_hook import ParamSchedulerHook
6 | from mmengine.hooks.sampler_seed_hook import DistSamplerSeedHook
7 | from mmengine.runner.log_processor import LogProcessor
8 |
9 | from mmdet3d.engine.hooks.visualization_hook import Det3DVisualizationHook
10 |
11 | default_scope = 'mmdet3d'
12 |
13 | default_hooks = dict(
14 | timer=dict(type=IterTimerHook),
15 | logger=dict(type=LoggerHook, interval=50),
16 | param_scheduler=dict(type=ParamSchedulerHook),
17 | checkpoint=dict(type=CheckpointHook, interval=-1),
18 | sampler_seed=dict(type=DistSamplerSeedHook),
19 | visualization=dict(type=Det3DVisualizationHook))
20 |
21 | env_cfg = dict(
22 | cudnn_benchmark=False,
23 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
24 | dist_cfg=dict(backend='nccl'),
25 | )
26 |
27 | log_processor = dict(type=LogProcessor, window_size=50, by_epoch=True)
28 |
29 | log_level = 'INFO'
30 | load_from = None
31 | resume = False
32 |
33 | # TODO: support auto scaling lr
34 |
--------------------------------------------------------------------------------
/mmdet3d/configs/_base_/schedules/mmdet_schedule_1x.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
3 | from mmengine.optim.scheduler.lr_scheduler import LinearLR, MultiStepLR
4 | from mmengine.runner.loops import EpochBasedTrainLoop, TestLoop, ValLoop
5 | from torch.optim.sgd import SGD
6 |
7 | # training schedule for 1x
8 | train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=12, val_interval=1)
9 | val_cfg = dict(type=ValLoop)
10 | test_cfg = dict(type=TestLoop)
11 |
12 | # learning rate
13 | param_scheduler = [
14 | dict(type=LinearLR, start_factor=0.001, by_epoch=False, begin=0, end=500),
15 | dict(
16 | type=MultiStepLR,
17 | begin=0,
18 | end=12,
19 | by_epoch=True,
20 | milestones=[8, 11],
21 | gamma=0.1)
22 | ]
23 |
24 | # optimizer
25 | optim_wrapper = dict(
26 | type=OptimWrapper,
27 | optimizer=dict(type=SGD, lr=0.02, momentum=0.9, weight_decay=0.0001))
28 |
29 | # Default setting for scaling LR automatically
30 | # - `enable` means enable scaling LR automatically
31 | # or not by default.
32 | # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
33 | auto_scale_lr = dict(enable=False, base_batch_size=16)
34 |
--------------------------------------------------------------------------------
/mmdet3d/configs/_base_/schedules/schedule_3x.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
3 | from mmengine.optim.scheduler.lr_scheduler import MultiStepLR
4 | from mmengine.runner.loops import EpochBasedTrainLoop, TestLoop, ValLoop
5 | from torch.optim.adamw import AdamW
6 |
7 | # optimizer
8 | # This schedule is mainly used by models on indoor dataset,
9 | # e.g., VoteNet on SUNRGBD and ScanNet
10 | lr = 0.008 # max learning rate
11 | optim_wrapper = dict(
12 | type=OptimWrapper,
13 | optimizer=dict(type=AdamW, lr=lr, weight_decay=0.01),
14 | clip_grad=dict(max_norm=10, norm_type=2),
15 | )
16 |
17 | # training schedule for 3x
18 | train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=36, val_interval=1)
19 | val_cfg = dict(type=ValLoop)
20 | test_cfg = dict(type=TestLoop)
21 |
22 | # learning rate
23 | param_scheduler = [
24 | dict(
25 | type=MultiStepLR,
26 | begin=0,
27 | end=36,
28 | by_epoch=True,
29 | milestones=[24, 32],
30 | gamma=0.1)
31 | ]
32 |
33 | # Default setting for scaling LR automatically
34 | # - `enable` means enable scaling LR automatically
35 | # or not by default.
36 | # - `base_batch_size` = (4 GPUs) x (8 samples per GPU).
37 | auto_scale_lr = dict(enable=False, base_batch_size=32)
38 |
--------------------------------------------------------------------------------
/mmdet3d/configs/_base_/schedules/seg_cosine_100e.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
3 | from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR
4 | from torch.optim.sgd import SGD
5 |
6 | # optimizer
7 | # This schedule is mainly used on S3DIS dataset in segmentation task
8 | optim_wrapper = dict(
9 | type=OptimWrapper,
10 | optimizer=dict(type=SGD, lr=0.1, momentum=0.9, weight_decay=0.001),
11 | clip_grad=None)
12 |
13 | param_scheduler = [
14 | dict(
15 | type=CosineAnnealingLR,
16 | T_max=100,
17 | eta_min=1e-5,
18 | by_epoch=True,
19 | begin=0,
20 | end=100)
21 | ]
22 |
23 | # runtime settings
24 | train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1)
25 | val_cfg = dict()
26 | test_cfg = dict()
27 |
28 | # Default setting for scaling LR automatically
29 | # - `enable` means enable scaling LR automatically
30 | # or not by default.
31 | # - `base_batch_size` = (4 GPUs) x (32 samples per GPU).
32 | auto_scale_lr = dict(enable=False, base_batch_size=128)
33 |
--------------------------------------------------------------------------------
/mmdet3d/configs/_base_/schedules/seg_cosine_150e.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
3 | from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR
4 | from torch.optim.sgd import SGD
5 |
6 | # optimizer
7 | # This schedule is mainly used on S3DIS dataset in segmentation task
8 | optim_wrapper = dict(
9 | type=OptimWrapper,
10 | optimizer=dict(type=SGD, lr=0.2, momentum=0.9, weight_decay=0.0001),
11 | clip_grad=None)
12 |
13 | param_scheduler = [
14 | dict(
15 | type=CosineAnnealingLR,
16 | T_max=150,
17 | eta_min=0.002,
18 | by_epoch=True,
19 | begin=0,
20 | end=150)
21 | ]
22 |
23 | # runtime settings
24 | train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=1)
25 | val_cfg = dict()
26 | test_cfg = dict()
27 |
28 | # Default setting for scaling LR automatically
29 | # - `enable` means enable scaling LR automatically
30 | # or not by default.
31 | # - `base_batch_size` = (8 GPUs) x (8 samples per GPU).
32 | auto_scale_lr = dict(enable=False, base_batch_size=64)
33 |
--------------------------------------------------------------------------------
/mmdet3d/configs/_base_/schedules/seg_cosine_200e.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
3 | from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR
4 | from torch.optim.adam import Adam
5 |
6 | # optimizer
7 | # This schedule is mainly used on S3DIS dataset in segmentation task
8 | optim_wrapper = dict(
9 | type=OptimWrapper,
10 | optimizer=dict(type=Adam, lr=0.001, weight_decay=0.01),
11 | clip_grad=None)
12 |
13 | param_scheduler = [
14 | dict(
15 | type=CosineAnnealingLR,
16 | T_max=200,
17 | eta_min=1e-5,
18 | by_epoch=True,
19 | begin=0,
20 | end=200)
21 | ]
22 |
23 | # runtime settings
24 | train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1)
25 | val_cfg = dict()
26 | test_cfg = dict()
27 |
28 | # Default setting for scaling LR automatically
29 | # - `enable` means enable scaling LR automatically
30 | # or not by default.
31 | # - `base_batch_size` = (2 GPUs) x (16 samples per GPU).
32 | auto_scale_lr = dict(enable=False, base_batch_size=32)
33 |
--------------------------------------------------------------------------------
/mmdet3d/configs/_base_/schedules/seg_cosine_50e.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
3 | from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR
4 | from torch.optim.adam import Adam
5 |
6 | # optimizer
7 | # This schedule is mainly used on S3DIS dataset in segmentation task
8 | optim_wrapper = dict(
9 | type=OptimWrapper,
10 | optimizer=dict(type=Adam, lr=0.001, weight_decay=0.001),
11 | clip_grad=None)
12 |
13 | param_scheduler = [
14 | dict(
15 | type=CosineAnnealingLR,
16 | T_max=50,
17 | eta_min=1e-5,
18 | by_epoch=True,
19 | begin=0,
20 | end=50)
21 | ]
22 |
23 | # runtime settings
24 | train_cfg = dict(by_epoch=True, max_epochs=50, val_interval=1)
25 | val_cfg = dict()
26 | test_cfg = dict()
27 |
28 | # Default setting for scaling LR automatically
29 | # - `enable` means enable scaling LR automatically
30 | # or not by default.
31 | # - `base_batch_size` = (2 GPUs) x (16 samples per GPU).
32 | auto_scale_lr = dict(enable=False, base_batch_size=32)
33 |
--------------------------------------------------------------------------------
/mmdet3d/configs/votenet/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
--------------------------------------------------------------------------------
/mmdet3d/engine/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .hooks import BenchmarkHook, Det3DVisualizationHook
3 |
4 | __all__ = ['Det3DVisualizationHook', 'BenchmarkHook']
5 |
--------------------------------------------------------------------------------
/mmdet3d/engine/hooks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .benchmark_hook import BenchmarkHook
3 | from .disable_object_sample_hook import DisableObjectSampleHook
4 | from .visualization_hook import Det3DVisualizationHook
5 |
6 | __all__ = [
7 | 'Det3DVisualizationHook', 'BenchmarkHook', 'DisableObjectSampleHook'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmdet3d/evaluation/functional/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .indoor_eval import (average_precision, eval_det_cls, eval_map_recall,
3 | indoor_eval)
4 | from .instance_seg_eval import (aggregate_predictions, instance_seg_eval,
5 | rename_gt)
6 | from .kitti_utils import do_eval, kitti_eval, kitti_eval_coco_style
7 | from .lyft_eval import (get_classwise_aps, get_single_class_aps, load_lyft_gts,
8 | load_lyft_predictions, lyft_eval)
9 | from .panoptic_seg_eval import panoptic_seg_eval
10 | from .scannet_utils import evaluate_matches, scannet_eval
11 | from .seg_eval import fast_hist, get_acc, get_acc_cls, per_class_iou, seg_eval
12 |
13 | __all__ = [
14 | 'average_precision', 'eval_det_cls', 'eval_map_recall', 'indoor_eval',
15 | 'aggregate_predictions', 'rename_gt', 'instance_seg_eval', 'load_lyft_gts',
16 | 'load_lyft_predictions', 'lyft_eval', 'get_classwise_aps',
17 | 'get_single_class_aps', 'fast_hist', 'per_class_iou', 'get_acc',
18 | 'get_acc_cls', 'seg_eval', 'kitti_eval', 'kitti_eval_coco_style',
19 | 'scannet_eval', 'evaluate_matches', 'do_eval', 'panoptic_seg_eval'
20 | ]
21 |
--------------------------------------------------------------------------------
/mmdet3d/evaluation/functional/kitti_utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .eval import do_eval, eval_class, kitti_eval, kitti_eval_coco_style
3 |
4 | __all__ = ['kitti_eval', 'kitti_eval_coco_style', 'do_eval', 'eval_class']
5 |
--------------------------------------------------------------------------------
/mmdet3d/evaluation/functional/scannet_utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .evaluate_semantic_instance import evaluate_matches, scannet_eval
3 |
4 | __all__ = ['scannet_eval', 'evaluate_matches']
5 |
--------------------------------------------------------------------------------
/mmdet3d/evaluation/functional/waymo_utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 |
3 | from .prediction_to_waymo import Prediction2Waymo
4 |
5 | __all__ = ['Prediction2Waymo']
6 |
--------------------------------------------------------------------------------
/mmdet3d/evaluation/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .indoor_metric import IndoorMetric # noqa: F401,F403
3 | from .instance_seg_metric import InstanceSegMetric # noqa: F401,F403
4 | from .kitti_metric import KittiMetric # noqa: F401,F403
5 | from .lyft_metric import LyftMetric # noqa: F401,F403
6 | from .nuscenes_metric import NuScenesMetric # noqa: F401,F403
7 | from .panoptic_seg_metric import PanopticSegMetric # noqa: F401,F403
8 | from .seg_metric import SegMetric # noqa: F401,F403
9 | from .waymo_metric import WaymoMetric # noqa: F401,F403
10 |
11 | __all__ = [
12 | 'KittiMetric', 'NuScenesMetric', 'IndoorMetric', 'LyftMetric', 'SegMetric',
13 | 'InstanceSegMetric', 'WaymoMetric', 'PanopticSegMetric'
14 | ]
15 |
--------------------------------------------------------------------------------
/mmdet3d/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet3d.models.layers.fusion_layers import * # noqa: F401,F403
3 | from .backbones import * # noqa: F401,F403
4 | from .data_preprocessors import * # noqa: F401,F403
5 | from .decode_heads import * # noqa: F401,F403
6 | from .dense_heads import * # noqa: F401,F403
7 | from .detectors import * # noqa: F401,F403
8 | from .layers import * # noqa: F401,F403
9 | from .losses import * # noqa: F401,F403
10 | from .middle_encoders import * # noqa: F401,F403
11 | from .necks import * # noqa: F401,F403
12 | from .roi_heads import * # noqa: F401,F403
13 | from .segmentors import * # noqa: F401,F403
14 | from .test_time_augs import * # noqa: F401,F403
15 | from .utils import * # noqa: F401,F403
16 | from .voxel_encoders import * # noqa: F401,F403
17 |
--------------------------------------------------------------------------------
/mmdet3d/models/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.backbones import SSDVGG, HRNet, ResNet, ResNetV1d, ResNeXt
3 |
4 | from .cylinder3d import Asymm3DSpconv
5 | from .dgcnn import DGCNNBackbone
6 | from .dla import DLANet
7 | from .mink_resnet import MinkResNet
8 | from .minkunet_backbone import MinkUNetBackbone
9 | from .multi_backbone import MultiBackbone
10 | from .nostem_regnet import NoStemRegNet
11 | from .pointnet2_sa_msg import PointNet2SAMSG
12 | from .pointnet2_sa_ssg import PointNet2SASSG
13 | from .second import SECOND
14 | from .spvcnn_backone import MinkUNetBackboneV2, SPVCNNBackbone
15 |
16 | __all__ = [
17 | 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'NoStemRegNet',
18 | 'SECOND', 'DGCNNBackbone', 'PointNet2SASSG', 'PointNet2SAMSG',
19 | 'MultiBackbone', 'DLANet', 'MinkResNet', 'Asymm3DSpconv',
20 | 'MinkUNetBackbone', 'SPVCNNBackbone', 'MinkUNetBackboneV2'
21 | ]
22 |
--------------------------------------------------------------------------------
/mmdet3d/models/data_preprocessors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .data_preprocessor import Det3DDataPreprocessor
3 |
4 | __all__ = ['Det3DDataPreprocessor']
5 |
--------------------------------------------------------------------------------
/mmdet3d/models/decode_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .cylinder3d_head import Cylinder3DHead
3 | from .decode_head import Base3DDecodeHead
4 | from .dgcnn_head import DGCNNHead
5 | from .minkunet_head import MinkUNetHead
6 | from .paconv_head import PAConvHead
7 | from .pointnet2_head import PointNet2Head
8 |
9 | __all__ = [
10 | 'PointNet2Head', 'DGCNNHead', 'PAConvHead', 'Cylinder3DHead',
11 | 'Base3DDecodeHead', 'MinkUNetHead'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmdet3d/models/detectors/ssd3dnet.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet3d.registry import MODELS
3 | from .votenet import VoteNet
4 |
5 |
6 | @MODELS.register_module()
7 | class SSD3DNet(VoteNet):
8 | """3DSSDNet model.
9 |
10 | https://arxiv.org/abs/2002.10187.pdf
11 | """
12 |
13 | def __init__(self,
14 | backbone,
15 | bbox_head=None,
16 | train_cfg=None,
17 | test_cfg=None,
18 | init_cfg=None,
19 | **kwargs):
20 | super(SSD3DNet, self).__init__(
21 | backbone=backbone,
22 | bbox_head=bbox_head,
23 | train_cfg=train_cfg,
24 | test_cfg=test_cfg,
25 | init_cfg=init_cfg,
26 | **kwargs)
27 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/dgcnn_modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .dgcnn_fa_module import DGCNNFAModule
3 | from .dgcnn_fp_module import DGCNNFPModule
4 | from .dgcnn_gf_module import DGCNNGFModule
5 |
6 | __all__ = ['DGCNNFAModule', 'DGCNNFPModule', 'DGCNNGFModule']
7 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/fusion_layers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .coord_transform import (apply_3d_transformation, bbox_2d_transform,
3 | coord_2d_transform)
4 | from .point_fusion import PointFusion
5 | from .vote_fusion import VoteFusion
6 |
7 | __all__ = [
8 | 'PointFusion', 'VoteFusion', 'apply_3d_transformation',
9 | 'bbox_2d_transform', 'coord_2d_transform'
10 | ]
11 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/paconv/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .paconv import PAConv, PAConvCUDA
3 |
4 | __all__ = ['PAConv', 'PAConvCUDA']
5 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/pointnet_modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .builder import build_sa_module
3 | from .paconv_sa_module import (PAConvCUDASAModule, PAConvCUDASAModuleMSG,
4 | PAConvSAModule, PAConvSAModuleMSG)
5 | from .point_fp_module import PointFPModule
6 | from .point_sa_module import PointSAModule, PointSAModuleMSG
7 | from .stack_point_sa_module import StackedSAModuleMSG
8 |
9 | __all__ = [
10 | 'build_sa_module', 'PointSAModuleMSG', 'PointSAModule', 'PointFPModule',
11 | 'PAConvSAModule', 'PAConvSAModuleMSG', 'PAConvCUDASAModule',
12 | 'PAConvCUDASAModuleMSG', 'StackedSAModuleMSG'
13 | ]
14 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/spconv/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .overwrite_spconv import register_spconv2
3 |
4 | try:
5 | import spconv
6 | except ImportError:
7 | IS_SPCONV2_AVAILABLE = False
8 | else:
9 | if hasattr(spconv, '__version__') and spconv.__version__ >= '2.0.0':
10 | IS_SPCONV2_AVAILABLE = register_spconv2()
11 | else:
12 | IS_SPCONV2_AVAILABLE = False
13 |
14 | __all__ = ['IS_SPCONV2_AVAILABLE']
15 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/spconv/overwrite_spconv/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .write_spconv2 import register_spconv2
3 |
4 | __all__ = ['register_spconv2']
5 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/torchsparse/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .torchsparse_wrapper import register_torchsparse
3 |
4 | try:
5 | import torchsparse # noqa
6 | except ImportError:
7 | IS_TORCHSPARSE_AVAILABLE = False
8 | else:
9 | IS_TORCHSPARSE_AVAILABLE = register_torchsparse()
10 |
11 | __all__ = ['IS_TORCHSPARSE_AVAILABLE']
12 |
--------------------------------------------------------------------------------
/mmdet3d/models/layers/torchsparse/torchsparse_wrapper.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch.nn as nn
3 | from mmengine.registry import MODELS
4 |
5 |
6 | def register_torchsparse() -> bool:
7 | """This func registers torchsparse modules."""
8 | try:
9 | from torchsparse.nn import (BatchNorm, Conv3d, GroupNorm, LeakyReLU,
10 | ReLU)
11 | from torchsparse.nn.utils import fapply
12 | from torchsparse.tensor import SparseTensor
13 | except ImportError:
14 | return False
15 | else:
16 |
17 | class SyncBatchNorm(nn.SyncBatchNorm):
18 |
19 | def forward(self, input: SparseTensor) -> SparseTensor:
20 | return fapply(input, super().forward)
21 |
22 | MODELS._register_module(Conv3d, 'TorchSparseConv3d')
23 | MODELS._register_module(BatchNorm, 'TorchSparseBN')
24 | MODELS._register_module(SyncBatchNorm, 'TorchSparseSyncBN')
25 | MODELS._register_module(GroupNorm, 'TorchSparseGN')
26 | MODELS._register_module(ReLU, 'TorchSparseReLU')
27 | MODELS._register_module(LeakyReLU, 'TorchSparseLeakyReLU')
28 | return True
29 |
--------------------------------------------------------------------------------
/mmdet3d/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.losses import FocalLoss, SmoothL1Loss, binary_cross_entropy
3 |
4 | from .axis_aligned_iou_loss import AxisAlignedIoULoss, axis_aligned_iou_loss
5 | from .chamfer_distance import ChamferDistance, chamfer_distance
6 | from .lovasz_loss import LovaszLoss
7 | from .multibin_loss import MultiBinLoss
8 | from .paconv_regularization_loss import PAConvRegularizationLoss
9 | from .rotated_iou_loss import RotatedIoU3DLoss, rotated_iou_3d_loss
10 | from .uncertain_smooth_l1_loss import UncertainL1Loss, UncertainSmoothL1Loss
11 |
12 | __all__ = [
13 | 'FocalLoss', 'SmoothL1Loss', 'binary_cross_entropy', 'ChamferDistance',
14 | 'chamfer_distance', 'axis_aligned_iou_loss', 'AxisAlignedIoULoss',
15 | 'PAConvRegularizationLoss', 'UncertainL1Loss', 'UncertainSmoothL1Loss',
16 | 'MultiBinLoss', 'RotatedIoU3DLoss', 'rotated_iou_3d_loss', 'LovaszLoss'
17 | ]
18 |
--------------------------------------------------------------------------------
/mmdet3d/models/middle_encoders/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .pillar_scatter import PointPillarsScatter
3 | from .sparse_encoder import SparseEncoder, SparseEncoderSASSD
4 | from .sparse_unet import SparseUNet
5 | from .voxel_set_abstraction import VoxelSetAbstraction
6 |
7 | __all__ = [
8 | 'PointPillarsScatter', 'SparseEncoder', 'SparseEncoderSASSD', 'SparseUNet',
9 | 'VoxelSetAbstraction'
10 | ]
11 |
--------------------------------------------------------------------------------
/mmdet3d/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.necks.fpn import FPN
3 |
4 | from .dla_neck import DLANeck
5 | from .imvoxel_neck import IndoorImVoxelNeck, OutdoorImVoxelNeck
6 | from .pointnet2_fp_neck import PointNetFPNeck
7 | from .second_fpn import SECONDFPN
8 |
9 | __all__ = [
10 | 'FPN', 'SECONDFPN', 'OutdoorImVoxelNeck', 'PointNetFPNeck', 'DLANeck',
11 | 'IndoorImVoxelNeck'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmdet3d/models/roi_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_3droi_head import Base3DRoIHead
3 | from .bbox_heads import PartA2BboxHead
4 | from .h3d_roi_head import H3DRoIHead
5 | from .mask_heads import PointwiseSemanticHead, PrimitiveHead
6 | from .part_aggregation_roi_head import PartAggregationROIHead
7 | from .point_rcnn_roi_head import PointRCNNRoIHead
8 | from .pv_rcnn_roi_head import PVRCNNRoiHead
9 | from .roi_extractors import Single3DRoIAwareExtractor, SingleRoIExtractor
10 |
11 | __all__ = [
12 | 'Base3DRoIHead', 'PartAggregationROIHead', 'PointwiseSemanticHead',
13 | 'Single3DRoIAwareExtractor', 'PartA2BboxHead', 'SingleRoIExtractor',
14 | 'H3DRoIHead', 'PrimitiveHead', 'PointRCNNRoIHead', 'PVRCNNRoiHead'
15 | ]
16 |
--------------------------------------------------------------------------------
/mmdet3d/models/roi_heads/bbox_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.roi_heads.bbox_heads import (BBoxHead, ConvFCBBoxHead,
3 | DoubleConvFCBBoxHead,
4 | Shared2FCBBoxHead,
5 | Shared4Conv1FCBBoxHead)
6 |
7 | from .h3d_bbox_head import H3DBboxHead
8 | from .parta2_bbox_head import PartA2BboxHead
9 | from .point_rcnn_bbox_head import PointRCNNBboxHead
10 | from .pv_rcnn_bbox_head import PVRCNNBBoxHead
11 |
12 | __all__ = [
13 | 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
14 | 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'PartA2BboxHead',
15 | 'H3DBboxHead', 'PointRCNNBboxHead', 'PVRCNNBBoxHead'
16 | ]
17 |
--------------------------------------------------------------------------------
/mmdet3d/models/roi_heads/mask_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .foreground_segmentation_head import ForegroundSegmentationHead
3 | from .pointwise_semantic_head import PointwiseSemanticHead
4 | from .primitive_head import PrimitiveHead
5 |
6 | __all__ = [
7 | 'PointwiseSemanticHead', 'PrimitiveHead', 'ForegroundSegmentationHead'
8 | ]
9 |
--------------------------------------------------------------------------------
/mmdet3d/models/roi_heads/roi_extractors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor
3 |
4 | from .batch_roigridpoint_extractor import Batch3DRoIGridExtractor
5 | from .single_roiaware_extractor import Single3DRoIAwareExtractor
6 | from .single_roipoint_extractor import Single3DRoIPointExtractor
7 |
8 | __all__ = [
9 | 'SingleRoIExtractor', 'Single3DRoIAwareExtractor',
10 | 'Single3DRoIPointExtractor', 'Batch3DRoIGridExtractor'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmdet3d/models/segmentors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base import Base3DSegmentor
3 | from .cylinder3d import Cylinder3D
4 | from .encoder_decoder import EncoderDecoder3D
5 | from .minkunet import MinkUNet
6 | from .seg3d_tta import Seg3DTTAModel
7 |
8 | __all__ = [
9 | 'Base3DSegmentor', 'EncoderDecoder3D', 'Cylinder3D', 'MinkUNet',
10 | 'Seg3DTTAModel'
11 | ]
12 |
--------------------------------------------------------------------------------
/mmdet3d/models/task_modules/anchor/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .anchor_3d_generator import (AlignedAnchor3DRangeGenerator,
3 | AlignedAnchor3DRangeGeneratorPerCls,
4 | Anchor3DRangeGenerator)
5 | from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
6 | build_anchor_generator, build_prior_generator)
7 |
8 | __all__ = [
9 | 'AlignedAnchor3DRangeGenerator', 'Anchor3DRangeGenerator',
10 | 'build_prior_generator', 'AlignedAnchor3DRangeGeneratorPerCls',
11 | 'build_anchor_generator', 'ANCHOR_GENERATORS', 'PRIOR_GENERATORS'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmdet3d/models/task_modules/anchor/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import warnings
3 | from typing import Any
4 |
5 | from mmdet3d.registry import TASK_UTILS
6 | from mmdet3d.utils import ConfigType
7 |
8 | PRIOR_GENERATORS = TASK_UTILS
9 |
10 | ANCHOR_GENERATORS = TASK_UTILS
11 |
12 |
13 | def build_prior_generator(cfg: ConfigType, default_args=None) -> Any:
14 | warnings.warn(
15 | '``build_prior_generator`` would be deprecated soon, please use '
16 | '``mmdet3d.registry.TASK_UTILS.build()`` ')
17 | return TASK_UTILS.build(cfg, default_args=default_args)
18 |
19 |
20 | def build_anchor_generator(cfg: ConfigType, default_args=None) -> Any:
21 | warnings.warn(
22 | '``build_anchor_generator`` would be deprecated soon, please use '
23 | '``mmdet3d.registry.TASK_UTILS.build()`` ')
24 | return TASK_UTILS.build(cfg, default_args=default_args)
25 |
--------------------------------------------------------------------------------
/mmdet3d/models/task_modules/assigners/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .max_3d_iou_assigner import Max3DIoUAssigner
3 |
4 | __all__ = ['Max3DIoUAssigner']
5 |
--------------------------------------------------------------------------------
/mmdet3d/models/task_modules/builder.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import warnings
3 | from typing import Any
4 |
5 | from mmdet3d.registry import TASK_UTILS
6 | from mmdet3d.utils.typing_utils import ConfigType
7 |
8 | BBOX_ASSIGNERS = TASK_UTILS
9 | BBOX_SAMPLERS = TASK_UTILS
10 | BBOX_CODERS = TASK_UTILS
11 |
12 |
13 | def build_assigner(cfg: ConfigType, **default_args) -> Any:
14 | """Builder of box assigner."""
15 | warnings.warn('``build_assigner`` would be deprecated soon, please use '
16 | '``mmdet3d.registry.TASK_UTILS.build()`` ')
17 | return TASK_UTILS.build(cfg, default_args=default_args)
18 |
19 |
20 | def build_sampler(cfg: ConfigType, **default_args) -> Any:
21 | """Builder of box sampler."""
22 | warnings.warn('``build_sampler`` would be deprecated soon, please use '
23 | '``mmdet3d.registry.TASK_UTILS.build()`` ')
24 | return TASK_UTILS.build(cfg, default_args=default_args)
25 |
26 |
27 | def build_bbox_coder(cfg: ConfigType, **default_args) -> Any:
28 | """Builder of box coder."""
29 | warnings.warn('``build_bbox_coder`` would be deprecated soon, please use '
30 | '``mmdet3d.registry.TASK_UTILS.build()`` ')
31 | return TASK_UTILS.build(cfg, default_args=default_args)
32 |
--------------------------------------------------------------------------------
/mmdet3d/models/task_modules/coders/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .anchor_free_bbox_coder import AnchorFreeBBoxCoder
3 | from .centerpoint_bbox_coders import CenterPointBBoxCoder
4 | from .delta_xyzwhlr_bbox_coder import DeltaXYZWLHRBBoxCoder
5 | from .fcos3d_bbox_coder import FCOS3DBBoxCoder
6 | from .groupfree3d_bbox_coder import GroupFree3DBBoxCoder
7 | from .monoflex_bbox_coder import MonoFlexCoder
8 | from .partial_bin_based_bbox_coder import PartialBinBasedBBoxCoder
9 | from .pgd_bbox_coder import PGDBBoxCoder
10 | from .point_xyzwhlr_bbox_coder import PointXYZWHLRBBoxCoder
11 | from .smoke_bbox_coder import SMOKECoder
12 |
13 | __all__ = [
14 | 'DeltaXYZWLHRBBoxCoder', 'PartialBinBasedBBoxCoder',
15 | 'CenterPointBBoxCoder', 'AnchorFreeBBoxCoder', 'GroupFree3DBBoxCoder',
16 | 'PointXYZWHLRBBoxCoder', 'FCOS3DBBoxCoder', 'PGDBBoxCoder', 'SMOKECoder',
17 | 'MonoFlexCoder'
18 | ]
19 |
--------------------------------------------------------------------------------
/mmdet3d/models/task_modules/samplers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from mmdet.models.task_modules.samplers import (BaseSampler, CombinedSampler,
3 | InstanceBalancedPosSampler,
4 | IoUBalancedNegSampler,
5 | OHEMSampler, RandomSampler,
6 | SamplingResult)
7 |
8 | from .iou_neg_piecewise_sampler import IoUNegPiecewiseSampler
9 | from .pseudosample import PseudoSampler
10 |
11 | __all__ = [
12 | 'BaseSampler', 'PseudoSampler', 'RandomSampler',
13 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
14 | 'OHEMSampler', 'SamplingResult', 'IoUNegPiecewiseSampler'
15 | ]
16 |
--------------------------------------------------------------------------------
/mmdet3d/models/task_modules/voxel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .voxel_generator import VoxelGenerator
3 |
4 | __all__ = ['VoxelGenerator']
5 |
--------------------------------------------------------------------------------
/mmdet3d/models/test_time_augs/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .merge_augs import merge_aug_bboxes_3d
3 |
4 | __all__ = ['merge_aug_bboxes_3d']
5 |
--------------------------------------------------------------------------------
/mmdet3d/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .add_prefix import add_prefix
3 | from .clip_sigmoid import clip_sigmoid
4 | from .edge_indices import get_edge_indices
5 | from .gaussian import (draw_heatmap_gaussian, ellip_gaussian2D, gaussian_2d,
6 | gaussian_radius, get_ellip_gaussian_2D)
7 | from .gen_keypoints import get_keypoints
8 | from .handle_objs import filter_outside_objs, handle_proj_objs
9 |
10 | __all__ = [
11 | 'clip_sigmoid', 'get_edge_indices', 'filter_outside_objs',
12 | 'handle_proj_objs', 'get_keypoints', 'gaussian_2d',
13 | 'draw_heatmap_gaussian', 'gaussian_radius', 'get_ellip_gaussian_2D',
14 | 'ellip_gaussian2D', 'add_prefix'
15 | ]
16 |
--------------------------------------------------------------------------------
/mmdet3d/models/utils/add_prefix.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | def add_prefix(inputs: dict, prefix: str) -> dict:
3 | """Add prefix for dict.
4 |
5 | Args:
6 | inputs (dict): The input dict with str keys.
7 | prefix (str): The prefix to add.
8 |
9 | Returns:
10 |
11 | dict: The dict with keys updated with ``prefix``.
12 | """
13 |
14 | outputs = dict()
15 | for name, value in inputs.items():
16 | outputs[f'{prefix}.{name}'] = value
17 |
18 | return outputs
19 |
--------------------------------------------------------------------------------
/mmdet3d/models/utils/clip_sigmoid.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import torch
3 | from torch import Tensor
4 |
5 |
6 | def clip_sigmoid(x: Tensor, eps: float = 1e-4) -> Tensor:
7 | """Sigmoid function for input feature.
8 |
9 | Args:
10 | x (Tensor): Input feature map with the shape of [B, N, H, W].
11 | eps (float): Lower bound of the range to be clamped to.
12 | Defaults to 1e-4.
13 |
14 | Returns:
15 | Tensor: Feature map after sigmoid.
16 | """
17 | y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps)
18 | return y
19 |
--------------------------------------------------------------------------------
/mmdet3d/models/voxel_encoders/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .pillar_encoder import DynamicPillarFeatureNet, PillarFeatureNet
3 | from .voxel_encoder import (DynamicSimpleVFE, DynamicVFE, HardSimpleVFE,
4 | HardVFE, SegVFE)
5 |
6 | __all__ = [
7 | 'PillarFeatureNet', 'DynamicPillarFeatureNet', 'HardVFE', 'DynamicVFE',
8 | 'HardSimpleVFE', 'DynamicSimpleVFE', 'SegVFE'
9 | ]
10 |
--------------------------------------------------------------------------------
/mmdet3d/structures/bbox_3d/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_box3d import BaseInstance3DBoxes
3 | from .box_3d_mode import Box3DMode
4 | from .cam_box3d import CameraInstance3DBoxes
5 | from .coord_3d_mode import Coord3DMode
6 | from .depth_box3d import DepthInstance3DBoxes
7 | from .lidar_box3d import LiDARInstance3DBoxes
8 | from .utils import (get_box_type, get_proj_mat_by_coord_type, limit_period,
9 | mono_cam_box2vis, points_cam2img, points_img2cam,
10 | rotation_3d_in_axis, xywhr2xyxyr)
11 |
12 | __all__ = [
13 | 'Box3DMode', 'BaseInstance3DBoxes', 'LiDARInstance3DBoxes',
14 | 'CameraInstance3DBoxes', 'DepthInstance3DBoxes', 'xywhr2xyxyr',
15 | 'get_box_type', 'rotation_3d_in_axis', 'limit_period', 'points_cam2img',
16 | 'points_img2cam', 'Coord3DMode', 'mono_cam_box2vis',
17 | 'get_proj_mat_by_coord_type'
18 | ]
19 |
--------------------------------------------------------------------------------
/mmdet3d/structures/points/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .base_points import BasePoints
3 | from .cam_points import CameraPoints
4 | from .depth_points import DepthPoints
5 | from .lidar_points import LiDARPoints
6 |
7 | __all__ = ['BasePoints', 'CameraPoints', 'DepthPoints', 'LiDARPoints']
8 |
9 |
10 | def get_points_type(points_type: str) -> type:
11 | """Get the class of points according to coordinate type.
12 |
13 | Args:
14 | points_type (str): The type of points coordinate. The valid value are
15 | "CAMERA", "LIDAR" and "DEPTH".
16 |
17 | Returns:
18 | type: Points type.
19 | """
20 | points_type_upper = points_type.upper()
21 | if points_type_upper == 'CAMERA':
22 | points_cls = CameraPoints
23 | elif points_type_upper == 'LIDAR':
24 | points_cls = LiDARPoints
25 | elif points_type_upper == 'DEPTH':
26 | points_cls = DepthPoints
27 | else:
28 | raise ValueError('Only "points_type" of "CAMERA", "LIDAR" and "DEPTH" '
29 | f'are supported, got {points_type}')
30 |
31 | return points_cls
32 |
--------------------------------------------------------------------------------
/mmdet3d/testing/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .data_utils import (create_data_info_after_loading,
3 | create_dummy_data_info,
4 | create_mono3d_data_info_after_loading)
5 | from .model_utils import (create_detector_inputs, get_detector_cfg,
6 | get_model_cfg, setup_seed)
7 |
8 | __all__ = [
9 | 'create_dummy_data_info', 'create_data_info_after_loading',
10 | 'create_mono3d_data_info_after_loading', 'create_detector_inputs',
11 | 'get_detector_cfg', 'get_model_cfg', 'setup_seed'
12 | ]
13 |
--------------------------------------------------------------------------------
/mmdet3d/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .array_converter import ArrayConverter, array_converter
3 | from .collect_env import collect_env
4 | from .compat_cfg import compat_cfg
5 | from .misc import replace_ceph_backend
6 | from .setup_env import register_all_modules, setup_multi_processes
7 | from .typing_utils import (ConfigType, InstanceList, MultiConfig,
8 | OptConfigType, OptInstanceList, OptMultiConfig,
9 | OptSampleList, OptSamplingResultList)
10 |
11 | __all__ = [
12 | 'collect_env', 'setup_multi_processes', 'compat_cfg',
13 | 'register_all_modules', 'array_converter', 'ArrayConverter', 'ConfigType',
14 | 'OptConfigType', 'MultiConfig', 'OptMultiConfig', 'InstanceList',
15 | 'OptInstanceList', 'OptSamplingResultList', 'replace_ceph_backend',
16 | 'OptSampleList'
17 | ]
18 |
--------------------------------------------------------------------------------
/mmdet3d/utils/collect_env.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import mmdet
3 | from mmengine.utils import get_git_hash
4 | from mmengine.utils.dl_utils import collect_env as collect_base_env
5 |
6 | import mmdet3d
7 |
8 |
9 | def collect_env():
10 | """Collect the information of the running environments."""
11 | env_info = collect_base_env()
12 | env_info['MMDetection'] = mmdet.__version__
13 | env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7]
14 | from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE
15 | env_info['spconv2.0'] = IS_SPCONV2_AVAILABLE
16 |
17 | return env_info
18 |
19 |
20 | if __name__ == '__main__':
21 | for name, val in collect_env().items():
22 | print(f'{name}: {val}')
23 |
--------------------------------------------------------------------------------
/mmdet3d/utils/typing_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | """Collecting some commonly used type hint in MMDetection3D."""
3 | from typing import List, Optional, Union
4 |
5 | from mmdet.models.task_modules.samplers import SamplingResult
6 | from mmengine.config import ConfigDict
7 | from mmengine.structures import InstanceData
8 |
9 | from mmdet3d.structures.det3d_data_sample import Det3DDataSample
10 |
11 | # Type hint of config data
12 | ConfigType = Union[ConfigDict, dict]
13 | OptConfigType = Optional[ConfigType]
14 |
15 | # Type hint of one or more config data
16 | MultiConfig = Union[ConfigType, List[ConfigType]]
17 | OptMultiConfig = Optional[MultiConfig]
18 |
19 | InstanceList = List[InstanceData]
20 | OptInstanceList = Optional[InstanceList]
21 |
22 | SamplingResultList = List[SamplingResult]
23 |
24 | OptSamplingResultList = Optional[SamplingResultList]
25 | SampleList = List[Det3DDataSample]
26 | OptSampleList = Optional[SampleList]
27 |
--------------------------------------------------------------------------------
/mmdet3d/version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Open-MMLab. All rights reserved.
2 |
3 | __version__ = '1.4.0'
4 | short_version = __version__
5 |
6 |
7 | def parse_version_info(version_str: str) -> tuple:
8 | """Parse a version string into a tuple.
9 |
10 | Args:
11 | version_str (str): The version string.
12 |
13 | Returns:
14 | tuple: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and
15 | "2.0.0rc4" is parsed into (2, 0, 0, 'rc4').
16 | """
17 | version_info = []
18 | for x in version_str.split('.'):
19 | if x.isdigit():
20 | version_info.append(int(x))
21 | elif x.find('rc') != -1:
22 | patch_version = x.split('rc')
23 | version_info.append(int(patch_version[0]))
24 | version_info.append(f'rc{patch_version[1]}')
25 | return tuple(version_info)
26 |
27 |
28 | version_info = parse_version_info(__version__)
29 |
--------------------------------------------------------------------------------
/mmdet3d/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .local_visualizer import Det3DLocalVisualizer
3 | from .vis_utils import (proj_camera_bbox3d_to_img, proj_depth_bbox3d_to_img,
4 | proj_lidar_bbox3d_to_img, to_depth_mode, write_obj,
5 | write_oriented_bbox)
6 |
7 | __all__ = [
8 | 'Det3DLocalVisualizer', 'write_obj', 'write_oriented_bbox',
9 | 'to_depth_mode', 'proj_lidar_bbox3d_to_img', 'proj_depth_bbox3d_to_img',
10 | 'proj_camera_bbox3d_to_img'
11 | ]
12 |
--------------------------------------------------------------------------------
/model-index.yml:
--------------------------------------------------------------------------------
1 | Import:
2 | - configs/3dssd/metafile.yml
3 | - configs/centerpoint/metafile.yml
4 | - configs/dgcnn/metafile.yml
5 | - configs/dynamic_voxelization/metafile.yml
6 | - configs/fcos3d/metafile.yml
7 | - configs/free_anchor/metafile.yml
8 | - configs/groupfree3d/metafile.yml
9 | - configs/h3dnet/metafile.yml
10 | - configs/imvotenet/metafile.yml
11 | - configs/imvoxelnet/metafile.yml
12 | - configs/monoflex/metafile.yml
13 | - configs/mvxnet/metafile.yml
14 | - configs/nuimages/metafile.yml
15 | - configs/paconv/metafile.yml
16 | - configs/parta2/metafile.yml
17 | - configs/pgd/metafile.yml
18 | - configs/point_rcnn/metafile.yml
19 | - configs/pointnet2/metafile.yml
20 | - configs/pointpillars/metafile.yml
21 | - configs/regnet/metafile.yml
22 | - configs/second/metafile.yml
23 | - configs/smoke/metafile.yml
24 | - configs/ssn/metafile.yml
25 | - configs/votenet/metafile.yml
26 | - configs/minkunet/metafile.yml
27 | - configs/cylinder3d/metafile.yml
28 | - configs/pv_rcnn/metafile.yml
29 | - configs/fcaf3d/metafile.yml
30 | - configs/spvcnn/metafile.yml
31 |
--------------------------------------------------------------------------------
/projects/BEVFusion/bevfusion/__init__.py:
--------------------------------------------------------------------------------
1 | from .bevfusion import BEVFusion
2 | from .bevfusion_necks import GeneralizedLSSFPN
3 | from .depth_lss import DepthLSSTransform, LSSTransform
4 | from .loading import BEVLoadMultiViewImageFromFiles
5 | from .sparse_encoder import BEVFusionSparseEncoder
6 | from .transformer import TransformerDecoderLayer
7 | from .transforms_3d import (BEVFusionGlobalRotScaleTrans,
8 | BEVFusionRandomFlip3D, GridMask, ImageAug3D)
9 | from .transfusion_head import ConvFuser, TransFusionHead
10 | from .utils import (BBoxBEVL1Cost, HeuristicAssigner3D, HungarianAssigner3D,
11 | IoU3DCost)
12 |
13 | __all__ = [
14 | 'BEVFusion', 'TransFusionHead', 'ConvFuser', 'ImageAug3D', 'GridMask',
15 | 'GeneralizedLSSFPN', 'HungarianAssigner3D', 'BBoxBEVL1Cost', 'IoU3DCost',
16 | 'HeuristicAssigner3D', 'DepthLSSTransform', 'LSSTransform',
17 | 'BEVLoadMultiViewImageFromFiles', 'BEVFusionSparseEncoder',
18 | 'TransformerDecoderLayer', 'BEVFusionRandomFlip3D',
19 | 'BEVFusionGlobalRotScaleTrans'
20 | ]
21 |
--------------------------------------------------------------------------------
/projects/BEVFusion/bevfusion/ops/__init__.py:
--------------------------------------------------------------------------------
1 | from .bev_pool import bev_pool
2 | from .voxel import DynamicScatter, Voxelization, dynamic_scatter, voxelization
3 |
4 | __all__ = [
5 | 'bev_pool', 'Voxelization', 'voxelization', 'dynamic_scatter',
6 | 'DynamicScatter'
7 | ]
8 |
--------------------------------------------------------------------------------
/projects/BEVFusion/bevfusion/ops/bev_pool/__init__.py:
--------------------------------------------------------------------------------
1 | from .bev_pool import bev_pool
2 |
3 | __all__ = ['bev_pool']
4 |
--------------------------------------------------------------------------------
/projects/BEVFusion/bevfusion/ops/voxel/__init__.py:
--------------------------------------------------------------------------------
1 | from .scatter_points import DynamicScatter, dynamic_scatter
2 | from .voxelize import Voxelization, voxelization
3 |
4 | __all__ = ['Voxelization', 'voxelization', 'dynamic_scatter', 'DynamicScatter']
5 |
--------------------------------------------------------------------------------
/projects/BEVFusion/bevfusion/ops/voxel/src/voxelization.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include "voxelization.h"
3 |
4 | namespace voxelization {
5 |
6 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
7 | m.def("hard_voxelize", &hard_voxelize, "hard voxelize");
8 | m.def("dynamic_voxelize", &dynamic_voxelize, "dynamic voxelization");
9 | m.def("dynamic_point_to_voxel_forward", &dynamic_point_to_voxel_forward, "dynamic point to voxel forward");
10 | m.def("dynamic_point_to_voxel_backward", &dynamic_point_to_voxel_backward, "dynamic point to voxel backward");
11 | }
12 |
13 | } // namespace voxelization
14 |
--------------------------------------------------------------------------------
/projects/CENet/cenet/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | from .boundary_loss import BoundaryLoss
3 | from .cenet_backbone import CENet
4 | from .range_image_head import RangeImageHead
5 | from .range_image_segmentor import RangeImageSegmentor
6 | from .transforms_3d import SemkittiRangeView
7 |
8 | __all__ = [
9 | 'CENet', 'RangeImageHead', 'RangeImageSegmentor', 'SemkittiRangeView',
10 | 'BoundaryLoss'
11 | ]
12 |
--------------------------------------------------------------------------------
/projects/CenterFormer/centerformer/__init__.py:
--------------------------------------------------------------------------------
1 | from .bbox_ops import nms_iou3d
2 | from .centerformer import CenterFormer
3 | from .centerformer_backbone import (DeformableDecoderRPN,
4 | MultiFrameDeformableDecoderRPN)
5 | from .centerformer_head import CenterFormerBboxHead
6 | from .losses import FastFocalLoss
7 |
8 | __all__ = [
9 | 'CenterFormer', 'DeformableDecoderRPN', 'CenterFormerBboxHead',
10 | 'FastFocalLoss', 'nms_iou3d', 'MultiFrameDeformableDecoderRPN'
11 | ]
12 |
--------------------------------------------------------------------------------
/projects/DETR3D/detr3d/__init__.py:
--------------------------------------------------------------------------------
1 | from .detr3d import DETR3D
2 | from .detr3d_head import DETR3DHead
3 | from .detr3d_transformer import (Detr3DCrossAtten, Detr3DTransformer,
4 | Detr3DTransformerDecoder)
5 | from .hungarian_assigner_3d import HungarianAssigner3D
6 | from .match_cost import BBox3DL1Cost
7 | from .nms_free_coder import NMSFreeCoder
8 | from .vovnet import VoVNet
9 |
10 | __all__ = [
11 | 'VoVNet', 'DETR3D', 'DETR3DHead', 'Detr3DTransformer',
12 | 'Detr3DTransformerDecoder', 'Detr3DCrossAtten', 'HungarianAssigner3D',
13 | 'BBox3DL1Cost', 'NMSFreeCoder'
14 | ]
15 |
--------------------------------------------------------------------------------
/projects/DETR3D/detr3d/match_cost.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | import torch
4 | from torch import Tensor
5 |
6 | from mmdet3d.registry import TASK_UTILS
7 |
8 |
9 | @TASK_UTILS.register_module()
10 | class BBox3DL1Cost(object):
11 | """BBox3DL1Cost.
12 |
13 | Args:
14 | weight (Union[float, int]): Cost weight. Defaults to 1.
15 | """
16 |
17 | def __init__(self, weight: Union[float, int] = 1.):
18 | self.weight = weight
19 |
20 | def __call__(self, bbox_pred: Tensor, gt_bboxes: Tensor) -> Tensor:
21 | """Compute match cost.
22 |
23 | Args:
24 | bbox_pred (Tensor): Predicted boxes with normalized coordinates
25 | (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y)
26 | which are all in range [0, 1] and shape [num_query, 10].
27 | gt_bboxes (Tensor): Ground truth boxes with `normalized`
28 | coordinates (cx,cy,l,w,cz,h,sin(φ),cos(φ),v_x,v_y).
29 | Shape [num_gt, 10].
30 | Returns:
31 | Tensor: Match Cost matrix of shape (num_preds, num_gts).
32 | """
33 | bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)
34 | return bbox_cost * self.weight
35 |
--------------------------------------------------------------------------------
/projects/DETR3D/old_detr3d_converter.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 |
3 | import torch
4 |
5 | parser = ArgumentParser()
6 | parser.add_argument('src', default='old.pth')
7 | parser.add_argument('dst', default='new.pth') # ('training','validation')
8 | parser.add_argument('--code_size', type=int, default='10')
9 | args = parser.parse_args()
10 | model = torch.load(args.src)
11 | code_size = args.code_size
12 | if model['meta'].get('detr3d_convert_tag') is not None:
13 | print('this model has already converted!')
14 | else:
15 | print('converting...')
16 | # (cx, cy, w, l, cz, h, sin(φ), cos(φ), vx, vy)
17 | for key in model['state_dict']:
18 | tsr = model['state_dict'][key]
19 | if 'reg_branches' in key and tsr.shape[0] == code_size:
20 | print(key, ' with ', tsr.shape, 'has changed')
21 | tsr[[2, 3], ...] = tsr[[3, 2], ...]
22 | tsr[[6, 7], ...] = -tsr[[7, 6], ...]
23 | model['meta']['detr3d_convert_tag'] = True
24 | torch.save(model, args.dst)
25 | print('done...')
26 |
--------------------------------------------------------------------------------
/projects/DSVT/dsvt/__init__.py:
--------------------------------------------------------------------------------
1 | from .disable_aug_hook import DisableAugHook
2 | from .dsvt import DSVT
3 | from .dsvt_head import DSVTCenterHead
4 | from .dsvt_transformer import DSVTMiddleEncoder
5 | from .dynamic_pillar_vfe import DynamicPillarVFE3D
6 | from .map2bev import PointPillarsScatter3D
7 | from .res_second import ResSECOND
8 | from .transforms_3d import ObjectRangeFilter3D, PointsRangeFilter3D
9 | from .utils import DSVTBBoxCoder
10 |
11 | __all__ = [
12 | 'DSVTCenterHead', 'DSVT', 'DSVTMiddleEncoder', 'DynamicPillarVFE3D',
13 | 'PointPillarsScatter3D', 'ResSECOND', 'DSVTBBoxCoder',
14 | 'ObjectRangeFilter3D', 'PointsRangeFilter3D', 'DisableAugHook'
15 | ]
16 |
--------------------------------------------------------------------------------
/projects/DSVT/dsvt/ops/ingroup_inds/ingroup_inds_op.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Function
3 |
4 | try:
5 | from . import ingroup_inds_cuda
6 |
7 | # import ingroup_indices
8 | except ImportError:
9 | ingroup_indices = None
10 | print('Can not import ingroup indices')
11 |
12 | ingroup_indices = ingroup_inds_cuda
13 |
14 |
15 | class IngroupIndicesFunction(Function):
16 |
17 | @staticmethod
18 | def forward(ctx, group_inds):
19 |
20 | out_inds = torch.zeros_like(group_inds) - 1
21 |
22 | ingroup_indices.forward(group_inds, out_inds)
23 |
24 | ctx.mark_non_differentiable(out_inds)
25 |
26 | return out_inds
27 |
28 | @staticmethod
29 | def backward(ctx, g):
30 |
31 | return None
32 |
33 |
34 | ingroup_inds = IngroupIndicesFunction.apply
35 |
--------------------------------------------------------------------------------
/projects/DSVT/dsvt/ops/ingroup_inds/src/error.cuh:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 |
4 | #define CHECK_CALL(call) \
5 | do \
6 | { \
7 | const cudaError_t error_code = call; \
8 | if (error_code != cudaSuccess) \
9 | { \
10 | printf("CUDA Error:\n"); \
11 | printf(" File: %s\n", __FILE__); \
12 | printf(" Line: %d\n", __LINE__); \
13 | printf(" Error code: %d\n", error_code); \
14 | printf(" Error text: %s\n", \
15 | cudaGetErrorString(error_code)); \
16 | exit(1); \
17 | } \
18 | } while (0)
19 |
--------------------------------------------------------------------------------
/projects/NeRF-Det/nerfdet/__init__.py:
--------------------------------------------------------------------------------
1 | from .data_preprocessor import NeRFDetDataPreprocessor
2 | from .formating import PackNeRFDetInputs
3 | from .multiview_pipeline import MultiViewPipeline, RandomShiftOrigin
4 | from .nerfdet import NerfDet
5 | from .nerfdet_head import NerfDetHead
6 | from .scannet_multiview_dataset import MultiViewScanNetDataset
7 |
8 | __all__ = [
9 | 'MultiViewScanNetDataset', 'MultiViewPipeline', 'RandomShiftOrigin',
10 | 'PackNeRFDetInputs', 'NeRFDetDataPreprocessor', 'NerfDetHead', 'NerfDet'
11 | ]
12 |
--------------------------------------------------------------------------------
/projects/PETR/petr/__init__.py:
--------------------------------------------------------------------------------
1 | from .cp_fpn import CPFPN
2 | from .hungarian_assigner_3d import HungarianAssigner3D
3 | from .match_cost import BBox3DL1Cost
4 | from .nms_free_coder import NMSFreeCoder
5 | from .petr import PETR
6 | from .petr_head import PETRHead
7 | from .petr_transformer import (PETRDNTransformer, PETRMultiheadAttention,
8 | PETRTransformer, PETRTransformerDecoder,
9 | PETRTransformerDecoderLayer,
10 | PETRTransformerEncoder)
11 | from .positional_encoding import (LearnedPositionalEncoding3D,
12 | SinePositionalEncoding3D)
13 | from .transforms_3d import GlobalRotScaleTransImage, ResizeCropFlipImage
14 | from .utils import denormalize_bbox, normalize_bbox
15 | from .vovnetcp import VoVNetCP
16 |
17 | __all__ = [
18 | 'GlobalRotScaleTransImage', 'ResizeCropFlipImage', 'VoVNetCP', 'PETRHead',
19 | 'CPFPN', 'HungarianAssigner3D', 'NMSFreeCoder', 'BBox3DL1Cost',
20 | 'LearnedPositionalEncoding3D', 'PETRDNTransformer',
21 | 'PETRMultiheadAttention', 'PETRTransformer', 'PETRTransformerDecoder',
22 | 'PETRTransformerDecoderLayer', 'PETRTransformerEncoder', 'PETR',
23 | 'SinePositionalEncoding3D', 'denormalize_bbox', 'normalize_bbox'
24 | ]
25 |
--------------------------------------------------------------------------------
/projects/TPVFormer/tpvformer/__init__.py:
--------------------------------------------------------------------------------
1 | from .cross_view_hybrid_attention import TPVCrossViewHybridAttention
2 | from .data_preprocessor import TPVFormerDataPreprocessor
3 | from .image_cross_attention import TPVImageCrossAttention
4 | from .loading import BEVLoadMultiViewImageFromFiles, SegLabelMapping
5 | from .nuscenes_dataset import NuScenesSegDataset
6 | from .positional_encoding import TPVFormerPositionalEncoding
7 | from .tpvformer import TPVFormer
8 | from .tpvformer_encoder import TPVFormerEncoder
9 | from .tpvformer_head import TPVFormerDecoder
10 | from .tpvformer_layer import TPVFormerLayer
11 |
12 | __all__ = [
13 | 'TPVCrossViewHybridAttention', 'TPVImageCrossAttention',
14 | 'TPVFormerPositionalEncoding', 'TPVFormer', 'TPVFormerEncoder',
15 | 'TPVFormerLayer', 'NuScenesSegDataset', 'BEVLoadMultiViewImageFromFiles',
16 | 'SegLabelMapping', 'TPVFormerDecoder', 'TPVFormerDataPreprocessor'
17 | ]
18 |
--------------------------------------------------------------------------------
/projects/TR3D/tr3d/__init__.py:
--------------------------------------------------------------------------------
1 | from .axis_aligned_iou_loss import TR3DAxisAlignedIoULoss
2 | from .mink_resnet import TR3DMinkResNet
3 | from .rotated_iou_loss import TR3DRotatedIoU3DLoss
4 | from .tr3d_head import TR3DHead
5 | from .tr3d_neck import TR3DNeck
6 | from .transforms_3d import TR3DPointSample
7 |
8 | __all__ = [
9 | 'TR3DAxisAlignedIoULoss', 'TR3DMinkResNet', 'TR3DRotatedIoU3DLoss',
10 | 'TR3DHead', 'TR3DNeck', 'TR3DPointSample'
11 | ]
12 |
--------------------------------------------------------------------------------
/projects/example_project/configs/fcos3d_dummy-resnet-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py:
--------------------------------------------------------------------------------
1 | _base_ = [
2 | '../../../configs/fcos3d/fcos3d_r101-caffe-dcn_fpn_head-gn_8xb2-1x_nus-mono3d.py' # noqa
3 | ]
4 |
5 | custom_imports = dict(imports=['projects.example_project.dummy'])
6 |
7 | _base_.model.backbone.type = 'DummyResNet'
8 |
--------------------------------------------------------------------------------
/projects/example_project/dummy/__init__.py:
--------------------------------------------------------------------------------
1 | from .dummy_resnet import DummyResNet
2 |
3 | __all__ = ['DummyResNet']
4 |
--------------------------------------------------------------------------------
/projects/example_project/dummy/dummy_resnet.py:
--------------------------------------------------------------------------------
1 | from mmdet.models.backbones import ResNet
2 |
3 | from mmdet3d.registry import MODELS
4 |
5 |
6 | @MODELS.register_module()
7 | class DummyResNet(ResNet):
8 | """Implements a dummy ResNet wrapper for demonstration purpose.
9 | Args:
10 | **kwargs: All the arguments are passed to the parent class.
11 | """
12 |
13 | def __init__(self, **kwargs) -> None:
14 | print('Hello world!')
15 | super().__init__(**kwargs)
16 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -r requirements/build.txt
2 | -r requirements/optional.txt
3 | -r requirements/runtime.txt
4 | -r requirements/tests.txt
5 |
--------------------------------------------------------------------------------
/requirements/build.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/requirements/build.txt
--------------------------------------------------------------------------------
/requirements/docs.txt:
--------------------------------------------------------------------------------
1 | docutils==0.16.0
2 | markdown>=3.4.0
3 | myst-parser
4 | -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
5 | sphinx==4.0.2
6 | sphinx-tabs
7 | sphinx_copybutton
8 | sphinx_markdown_tables>=0.0.16
9 | tabulate
10 | urllib3<2.0.0
11 |
--------------------------------------------------------------------------------
/requirements/mminstall.txt:
--------------------------------------------------------------------------------
1 | mmcv>=2.0.0rc4,<2.2.0
2 | mmdet>=3.0.0,<3.3.0
3 | mmengine>=0.7.1,<1.0.0
4 |
--------------------------------------------------------------------------------
/requirements/optional.txt:
--------------------------------------------------------------------------------
1 | black==20.8b1 # be compatible with typing-extensions 3.7.4
2 | typing-extensions # required by tensorflow<=2.6
3 | waymo-open-dataset-tf-2-6-0 # requires python>=3.7
4 |
--------------------------------------------------------------------------------
/requirements/readthedocs.txt:
--------------------------------------------------------------------------------
1 | mmcv>=2.0.0rc4
2 | mmdet>=3.0.0
3 | mmengine>=0.7.1
4 | torch
5 | torchvision
6 |
--------------------------------------------------------------------------------
/requirements/runtime.txt:
--------------------------------------------------------------------------------
1 | lyft_dataset_sdk
2 | networkx>=2.5
3 | numba # you should install numba==0.53.0 if your environment is cuda-9.0
4 | numpy
5 | nuscenes-devkit
6 | open3d
7 | plyfile
8 | scikit-image
9 | # by default we also use tensorboard to log results
10 | tensorboard
11 | trimesh
12 |
--------------------------------------------------------------------------------
/requirements/tests.txt:
--------------------------------------------------------------------------------
1 | codecov
2 | flake8
3 | interrogate
4 | isort
5 | # Note: used for kwarray.group_items, this may be ported to mmcv in the future.
6 | kwarray
7 | parameterized
8 | pytest
9 | pytest-cov
10 | pytest-runner
11 | ubelt
12 | xdoctest >= 0.10.0
13 | yapf
14 |
--------------------------------------------------------------------------------
/resources/browse_dataset_mono.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/browse_dataset_mono.png
--------------------------------------------------------------------------------
/resources/browse_dataset_multi_modality.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/browse_dataset_multi_modality.png
--------------------------------------------------------------------------------
/resources/browse_dataset_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/browse_dataset_seg.png
--------------------------------------------------------------------------------
/resources/coord_sys_all.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/coord_sys_all.png
--------------------------------------------------------------------------------
/resources/data_pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/data_pipeline.png
--------------------------------------------------------------------------------
/resources/loss_curve.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/loss_curve.png
--------------------------------------------------------------------------------
/resources/mmdet3d-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/mmdet3d-logo.png
--------------------------------------------------------------------------------
/resources/mmdet3d_outdoor_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/mmdet3d_outdoor_demo.gif
--------------------------------------------------------------------------------
/resources/nuimages_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/nuimages_demo.gif
--------------------------------------------------------------------------------
/resources/open3d_visual.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/open3d_visual.gif
--------------------------------------------------------------------------------
/resources/open3d_visual.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/resources/open3d_visual.png
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [yapf]
2 | BASED_ON_STYLE = pep8
3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
5 |
6 | [isort]
7 | line_length = 79
8 | multi_line_output = 0
9 | extra_standard_library = setuptools
10 | known_first_party = mmdet3d
11 | known_third_party = cv2,imageio,indoor3d_util,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,mmdet,mmengine,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,pytorch_sphinx_theme,recommonmark,requests,scannet_utils,scipy,seaborn,shapely,skimage,sphinx,tensorflow,terminaltables,torch,trimesh,ts,waymo_open_dataset
12 | no_lines_before = STDLIB,LOCALFOLDER
13 | default_section = THIRDPARTY
14 |
15 | [codespell]
16 | ignore-words-list = ans,refridgerator,crate,hist,formating,dout,wan,nd,fo,avod,AVOD,warmup
17 |
18 | [flake8]
19 | per-file-ignores = mmdet3d/configs/*:F401,F403,F405
20 |
--------------------------------------------------------------------------------
/tests/data/kitti/a.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/a.bin
--------------------------------------------------------------------------------
/tests/data/kitti/kitti_dbinfos_train.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/kitti_dbinfos_train.pkl
--------------------------------------------------------------------------------
/tests/data/kitti/kitti_gt_database/0_Pedestrian_0.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/kitti_gt_database/0_Pedestrian_0.bin
--------------------------------------------------------------------------------
/tests/data/kitti/kitti_infos_mono3d.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/kitti_infos_mono3d.pkl
--------------------------------------------------------------------------------
/tests/data/kitti/kitti_infos_train.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/kitti_infos_train.pkl
--------------------------------------------------------------------------------
/tests/data/kitti/mono3d_sample_results.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/mono3d_sample_results.pkl
--------------------------------------------------------------------------------
/tests/data/kitti/mono3d_sample_results2d.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/mono3d_sample_results2d.pkl
--------------------------------------------------------------------------------
/tests/data/kitti/training/calib/000000.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/training/calib/000000.pkl
--------------------------------------------------------------------------------
/tests/data/kitti/training/image_2/000000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/training/image_2/000000.png
--------------------------------------------------------------------------------
/tests/data/kitti/training/image_2/000007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/training/image_2/000007.png
--------------------------------------------------------------------------------
/tests/data/kitti/training/velodyne/000000.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/training/velodyne/000000.bin
--------------------------------------------------------------------------------
/tests/data/kitti/training/velodyne_reduced/000000.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/kitti/training/velodyne_reduced/000000.bin
--------------------------------------------------------------------------------
/tests/data/lyft/lidar/host-a017_lidar1_1236118886501000046.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/lyft/lidar/host-a017_lidar1_1236118886501000046.bin
--------------------------------------------------------------------------------
/tests/data/lyft/lidar/host-a017_lidar1_1236118886701083686.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/lyft/lidar/host-a017_lidar1_1236118886701083686.bin
--------------------------------------------------------------------------------
/tests/data/lyft/lidar/host-a017_lidar1_1236118886901125926.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/lyft/lidar/host-a017_lidar1_1236118886901125926.bin
--------------------------------------------------------------------------------
/tests/data/lyft/lyft_infos.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/lyft/lyft_infos.pkl
--------------------------------------------------------------------------------
/tests/data/lyft/lyft_infos_val.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/lyft/lyft_infos_val.pkl
--------------------------------------------------------------------------------
/tests/data/lyft/sample_results.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/lyft/sample_results.pkl
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/maps/map_raster_palo_alto.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/lyft/v1.01-train/maps/map_raster_palo_alto.png
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/v1.01-train/category.json:
--------------------------------------------------------------------------------
1 | [{"description": "", "token": "8eccddb83fa7f8f992b2500f2ad658f65c9095588f3bc0ae338d97aff2dbcb9c", "name": "car"}, {"description": "", "token": "73e8de69959eb9f5b4cd2859e74bec4b5491417336cad63f27e8edb8530ffbf8", "name": "pedestrian"}, {"description": "", "token": "f81f51e1897311b55c0c6247c3db825466733e08df687c0ea830b026316a1c12", "name": "animal"}, {"description": "", "token": "a8619a81f5acc7da2a2f034420db2b2f43a015c8c689822e5295b5d6f7a5d8d6", "name": "other_vehicle"}, {"description": "", "token": "7ea5868735dc408109bfa74fd139bd8c5aa3767b52e054b8e792cabcf309c6f4", "name": "bus"}, {"description": "", "token": "1248d937c7a1a2eb1022ba9e806ebd5183b6c20c7f5703d8a7320600f1caad27", "name": "motorcycle"}, {"description": "", "token": "8af78d27e148a8f544f4c86b2a3f4bd6192b975d1065dd932ebdb8879778e275", "name": "truck"}, {"token": "c4e92c2b854fb03382838643c36d01a349f5624a11f184042d91d916d19acdbd", "description": "", "name": "emergency_vehicle"}, {"description": "", "token": "8c07dfa7af0da0191d59a2db50dc26ad1528be1fad483f17e2586309482d81bd", "name": "bicycle"}]
2 |
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/v1.01-train/log.json:
--------------------------------------------------------------------------------
1 | [{"date_captured": "2019-05-01", "location": "Palo Alto", "token": "9d0166ccd4af9c089738587f6e3d21cd9c8b6102787427da8c3b4f64161160c5", "vehicle": "a101", "logfile": ""}]
2 |
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/v1.01-train/map.json:
--------------------------------------------------------------------------------
1 | [{"log_tokens": ["9d0166ccd4af9c089738587f6e3d21cd9c8b6102787427da8c3b4f64161160c5"], "token": "53992ee3023e5494b90c316c183be829", "filename": "maps/map_raster_palo_alto.png", "category": "semantic_prior"}]
2 |
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/v1.01-train/sample.json:
--------------------------------------------------------------------------------
1 | [{"next": "b8625b49ee4b7679cb81c50895bb918c98800c274e1bca22cd3208770bd3aaa1", "prev": "da683bff4f51b8073ef139476f5ad745711527a7bc7d83b20fcb871f32f9eda6", "token": "199e3146d98e6a2047bafbc222b92f5b67c4640a69b0d1d35b710242de816679", "scene_token": "9d0166ccd4af9c089738587f6e3d21cd9c8b6102787427da8c3b4f64161160c5", "timestamp": 1556675185903083.2}]
2 |
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/v1.01-train/scene.json:
--------------------------------------------------------------------------------
1 | [{"description": "", "log_token": "9d0166ccd4af9c089738587f6e3d21cd9c8b6102787427da8c3b4f64161160c5", "name": "host-a101-lidar0-1240710366399037786-1240710391298976894", "first_sample_token": "0ebe3320a4049a1efe2af53c2094d102971a6269b70a72e127eaeabcbff9445d", "last_sample_token": "154956ad6f00a7c5e7058da67cca30b130ba0f3ea27dab72cd2ee802fa1f58cb", "nbr_samples": 126, "token": "9d0166ccd4af9c089738587f6e3d21cd9c8b6102787427da8c3b4f64161160c5"}]
2 |
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/v1.01-train/sensor.json:
--------------------------------------------------------------------------------
1 | [{"modality": "camera", "channel": "CAM_FRONT_LEFT", "token": "f7dad6bb70cb8e6245f96e5537e382848335872e6e259218b0a80cc071d162c4"}, {"modality": "lidar", "channel": "LIDAR_FRONT_LEFT", "token": "629da739ea3a0814b9d79ffc3b99cdbc8bdbb6f97abf139f1ad33bd12f8fa94d"}, {"modality": "camera", "channel": "CAM_FRONT", "token": "eb9e8f60a3d6e3328d7512b9f8e6800127fe91f4d62bc8e48a0e6a7cb116cc60"}, {"modality": "lidar", "channel": "LIDAR_TOP", "token": "25bf751d7e35f295393d8a418731474b21c1f702e878c4553f112397caa48c08"}, {"modality": "camera", "channel": "CAM_BACK_LEFT", "token": "c84592e22beb2c0f14d5159245ce8d6678431b879e940eed580651c09cc7d2f1"}, {"modality": "lidar", "channel": "LIDAR_FRONT_RIGHT", "token": "953faed96fd3d2fae3ec03cd2838b312b8c1a9bb7a0629481982870cb28acb67"}, {"modality": "camera", "channel": "CAM_BACK", "token": "172a55e2b50f18a6b6d545369a457003c2f3b438d0180b2b4c7819ca29b3f6ab"}, {"modality": "camera", "channel": "CAM_BACK_RIGHT", "token": "8e0259a868ddbfe589c2ce9c8bfb34a9e6c19c108b882c45433c76b70a2491a2"}, {"modality": "camera", "channel": "CAM_FRONT_ZOOMED", "token": "286718e1fbc8c8f0ca441969d91c36a8a809666e049e54ce121636100b520946"}, {"modality": "camera", "channel": "CAM_FRONT_RIGHT", "token": "de29f3ebb9d96988ac2c5913022c3102a51134e950284aac2e8511f83aff9a81"}]
2 |
--------------------------------------------------------------------------------
/tests/data/lyft/v1.01-train/v1.01-train/visibility.json:
--------------------------------------------------------------------------------
1 | [{"level": "v60-80", "description": "visibility of whole object is between 60 and 80%", "token": "3"}, {"level": "v0-40", "description": "visibility of whole object is between 0 and 40%", "token": "1"}, {"level": "v40-60", "description": "visibility of whole object is between 40 and 60%", "token": "2"}, {"level": "v80-100", "description": "visibility of whole object is between 80 and 100%", "token": "4"}]
2 |
--------------------------------------------------------------------------------
/tests/data/lyft/val.txt:
--------------------------------------------------------------------------------
1 | host-a101-lidar0-1240710366399037786-1240710391298976894
2 |
--------------------------------------------------------------------------------
/tests/data/nuscenes/mono3d_sample_results.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/nuscenes/mono3d_sample_results.pkl
--------------------------------------------------------------------------------
/tests/data/nuscenes/nus_info.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/nuscenes/nus_info.pkl
--------------------------------------------------------------------------------
/tests/data/nuscenes/samples/CAM_BACK_LEFT/n015-2018-07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/nuscenes/samples/CAM_BACK_LEFT/n015-2018-07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg
--------------------------------------------------------------------------------
/tests/data/nuscenes/samples/LIDAR_TOP/n015-2018-08-02-17-16-37+0800__LIDAR_TOP__1533201470948018.pcd.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/nuscenes/samples/LIDAR_TOP/n015-2018-08-02-17-16-37+0800__LIDAR_TOP__1533201470948018.pcd.bin
--------------------------------------------------------------------------------
/tests/data/nuscenes/sweeps/LIDAR_TOP/n008-2018-09-18-12-07-26-0400__LIDAR_TOP__1537287083900561.pcd.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/nuscenes/sweeps/LIDAR_TOP/n008-2018-09-18-12-07-26-0400__LIDAR_TOP__1537287083900561.pcd.bin
--------------------------------------------------------------------------------
/tests/data/nuscenes/sweeps/LIDAR_TOP/n015-2018-08-02-17-16-37+0800__LIDAR_TOP__1533201470898274.pcd.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/nuscenes/sweeps/LIDAR_TOP/n015-2018-08-02-17-16-37+0800__LIDAR_TOP__1533201470898274.pcd.bin
--------------------------------------------------------------------------------
/tests/data/ops/features_for_fps_distance.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/ops/features_for_fps_distance.npy
--------------------------------------------------------------------------------
/tests/data/ops/fps_idx.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/ops/fps_idx.npy
--------------------------------------------------------------------------------
/tests/data/s3dis/instance_mask/Area_1_office_2.bin:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/tests/data/s3dis/points/Area_1_office_2.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/s3dis/points/Area_1_office_2.bin
--------------------------------------------------------------------------------
/tests/data/s3dis/s3dis_infos.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-mmlab/mmdetection3d/fe25f7a51d36e3702f961e198894580d83c4387b/tests/data/s3dis/s3dis_infos.pkl
--------------------------------------------------------------------------------
/tests/data/s3dis/semantic_mask/Area_1_office_2.bin:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/tests/data/scannet/instance_mask/scene0000_00.bin:
--------------------------------------------------------------------------------
1 | 8
2 | # +
3 | ( ! 8
4 | , 5 % ! !
5 | @ 9 .
6 | %
7 |
8 |
9 | 8 &