├── data ├── utils │ └── __init__.py ├── data_qualificator │ ├── internal │ │ └── __init__.py │ ├── benchmarking │ │ └── __init__.py │ └── data_qualificator.py ├── .DS_Store ├── data_loader │ ├── .DS_Store │ └── data_loader.py ├── data_denoisor │ ├── denoisor_pcp_utils.py │ └── data_denoisor.py ├── data_slimmor │ ├── __init__.py │ └── data_slim.py ├── data_visualizor │ ├── .DS_Store │ ├── __init__.py │ ├── visualization_utils.py │ └── data_visualizor.py └── data_simulator │ ├── integral_lookup_tables │ ├── shifted │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.1.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.2.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.005.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.01.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.02.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.03.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.06.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.12.pickle │ │ └── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.15.pickle │ └── original │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.005.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.01.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.02.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.03.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.06.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.1.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.12.pickle │ │ ├── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.15.pickle │ │ └── integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.2.pickle │ └── data_simulator.py ├── deephub ├── __init__.py ├── detection_model │ ├── backbones │ │ └── __init__.py │ ├── necks │ │ └── __init__.py │ ├── middle_encoders │ │ ├── __init__.py │ │ └── pillar_scatter.py │ ├── voxel_encoders │ │ └── __init__.py │ ├── __init__.py │ ├── heads │ │ ├── __init__.py │ │ └── anchor3d_head.py │ └── pointpillars.py └── denoisy_model │ ├── pcp │ ├── images │ │ └── teaser.png │ ├── .gitignore │ ├── noise_removal │ │ ├── run.sh │ │ └── utils.py │ ├── outliers_removal │ │ └── utils.py │ └── models │ │ └── download_models.py │ └── dmr │ ├── pretrained │ ├── supervised │ │ ├── epoch=153.ckpt │ │ └── events.out.tfevents.1585322227.ubuntu.40310.0 │ ├── unsupervised │ │ ├── epoch=141.ckpt │ │ └── events.out.tfevents.1585483449.ubuntu.20813.0 │ └── README.md │ ├── data │ └── README.md │ ├── ops │ └── emd │ │ ├── setup.py │ │ ├── emd.cpp │ │ └── README.md │ ├── train.py │ ├── environment.yml │ ├── models │ ├── net.py │ ├── pool.py │ └── conv.py │ └── utils │ └── dataset.py ├── engine ├── __init__.py ├── .DS_Store └── Tiny3D_engine.png ├── test ├── test_model_ops │ ├── test_ensemblor.py │ └── .DS_Store ├── .DS_Store ├── test_data_ops │ ├── .DS_Store │ ├── test_filter.py │ ├── test_loader.py │ ├── test_visualizor.py │ ├── test_denoisor.py │ └── test_simulator.py ├── data_tobe_tested │ ├── kitti │ │ └── kitti_000008.bin │ └── nuscenes │ │ └── n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151603547590.pcd.bin └── test_engine │ └── test_engine.py ├── .DS_Store ├── mmdet3d ├── ops │ ├── paconv │ │ └── __init__.py │ ├── spconv │ │ ├── overwrite_spconv │ │ │ └── __init__.py │ │ └── __init__.py │ ├── dgcnn_modules │ │ ├── __init__.py │ │ ├── dgcnn_fp_module.py │ │ └── dgcnn_fa_module.py │ ├── pointnet_modules │ │ ├── __init__.py │ │ ├── builder.py │ │ └── point_fp_module.py │ └── __init__.py ├── core │ ├── evaluation │ │ ├── waymo_utils │ │ │ └── __init__.py │ │ ├── kitti_utils │ │ │ └── __init__.py │ │ ├── scannet_utils │ │ │ ├── __init__.py │ │ │ └── util_3d.py │ │ └── __init__.py │ ├── bbox │ │ ├── assigners │ │ │ └── __init__.py │ │ ├── iou_calculators │ │ │ └── __init__.py │ │ ├── samplers │ │ │ └── __init__.py │ │ ├── structures │ │ │ └── __init__.py │ │ ├── coders │ │ │ ├── __init__.py │ │ │ └── delta_xyzwhlr_bbox_coder.py │ │ ├── __init__.py │ │ └── transforms.py │ ├── voxel │ │ ├── __init__.py │ │ └── builder.py │ ├── visualizer │ │ └── __init__.py │ ├── __init__.py │ ├── utils │ │ └── __init__.py │ ├── anchor │ │ └── __init__.py │ ├── post_processing │ │ └── __init__.py │ └── points │ │ ├── __init__.py │ │ ├── depth_points.py │ │ ├── lidar_points.py │ │ └── cam_points.py ├── models │ ├── segmentors │ │ └── __init__.py │ ├── roi_heads │ │ ├── mask_heads │ │ │ └── __init__.py │ │ ├── roi_extractors │ │ │ ├── __init__.py │ │ │ ├── single_roiaware_extractor.py │ │ │ └── single_roipoint_extractor.py │ │ ├── __init__.py │ │ ├── bbox_heads │ │ │ └── __init__.py │ │ └── base_3droi_head.py │ ├── decode_heads │ │ ├── __init__.py │ │ ├── dgcnn_head.py │ │ ├── paconv_head.py │ │ └── pointnet2_head.py │ ├── model_utils │ │ ├── __init__.py │ │ └── edge_fusion_module.py │ ├── middle_encoders │ │ └── __init__.py │ ├── voxel_encoders │ │ └── __init__.py │ ├── necks │ │ ├── __init__.py │ │ └── pointnet2_fp_neck.py │ ├── fusion_layers │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── clip_sigmoid.py │ │ ├── mlp.py │ │ ├── edge_indices.py │ │ └── gen_keypoints.py │ ├── backbones │ │ ├── __init__.py │ │ └── base_pointnet.py │ ├── detectors │ │ ├── smoke_mono3d.py │ │ ├── ssd3dnet.py │ │ ├── fcos_mono3d.py │ │ ├── __init__.py │ │ ├── two_stage.py │ │ ├── mvx_faster_rcnn.py │ │ ├── dynamic_voxelnet.py │ │ └── single_stage.py │ ├── losses │ │ ├── __init__.py │ │ └── axis_aligned_iou_loss.py │ ├── dense_heads │ │ ├── __init__.py │ │ └── base_mono3d_dense_head.py │ └── __init__.py ├── utils │ ├── __init__.py │ ├── collect_env.py │ ├── logger.py │ ├── misc.py │ └── setup_env.py ├── version.py ├── apis │ └── __init__.py ├── __init__.py └── datasets │ ├── pipelines │ ├── __init__.py │ ├── .ipynb_checkpoints │ │ └── __init__-checkpoint.py │ └── compose.py │ ├── builder.py │ ├── __init__.py │ └── dataset_wrappers.py ├── .gitignore └── model ├── model_compressor ├── compressor.py ├── demo_dynamic_quant.py ├── demo_torch_prune.py └── demo_static_quant.py ├── model_ensemblor └── model_ensemble.py └── model_deployor ├── deployor.py └── README.md /data/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /deephub/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data/data_qualificator/internal/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /engine/__init__.py: -------------------------------------------------------------------------------- 1 | from .engineor import * -------------------------------------------------------------------------------- /test/test_model_ops/test_ensemblor.py: -------------------------------------------------------------------------------- 1 | import unitest -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/.DS_Store -------------------------------------------------------------------------------- /deephub/detection_model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .second import SECOND -------------------------------------------------------------------------------- /deephub/detection_model/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .second_fpn import SECONDFPN -------------------------------------------------------------------------------- /data/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/.DS_Store -------------------------------------------------------------------------------- /data/data_qualificator/benchmarking/__init__.py: -------------------------------------------------------------------------------- 1 | from . import noise_generation 2 | -------------------------------------------------------------------------------- /test/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/test/.DS_Store -------------------------------------------------------------------------------- /engine/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/engine/.DS_Store -------------------------------------------------------------------------------- /deephub/detection_model/middle_encoders/__init__.py: -------------------------------------------------------------------------------- 1 | from .pillar_scatter import PointPillarsScatter -------------------------------------------------------------------------------- /deephub/detection_model/voxel_encoders/__init__.py: -------------------------------------------------------------------------------- 1 | from .pillar_encoder import PillarFeatureNet -------------------------------------------------------------------------------- /engine/Tiny3D_engine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/engine/Tiny3D_engine.png -------------------------------------------------------------------------------- /data/data_loader/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_loader/.DS_Store -------------------------------------------------------------------------------- /data/data_denoisor/denoisor_pcp_utils.py: -------------------------------------------------------------------------------- 1 | from deephub.denoisy_model.pcp.noise_removal.pcpnet import ResPCPNet 2 | -------------------------------------------------------------------------------- /data/data_slimmor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_slimmor/__init__.py -------------------------------------------------------------------------------- /data/data_slimmor/data_slim.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_slimmor/data_slim.py -------------------------------------------------------------------------------- /data/data_visualizor/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_visualizor/.DS_Store -------------------------------------------------------------------------------- /test/test_data_ops/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/test/test_data_ops/.DS_Store -------------------------------------------------------------------------------- /test/test_model_ops/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/test/test_model_ops/.DS_Store -------------------------------------------------------------------------------- /data/data_visualizor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_visualizor/__init__.py -------------------------------------------------------------------------------- /deephub/detection_model/__init__.py: -------------------------------------------------------------------------------- 1 | from .pointpillars import Pointpillars 2 | from .centerpoint import Centerpoint -------------------------------------------------------------------------------- /deephub/detection_model/heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .anchor3d_head import Anchor3DHead 2 | from .centerpoint_head import CenterHead -------------------------------------------------------------------------------- /data/data_visualizor/visualization_utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_visualizor/visualization_utils.py -------------------------------------------------------------------------------- /deephub/denoisy_model/pcp/images/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/deephub/denoisy_model/pcp/images/teaser.png -------------------------------------------------------------------------------- /test/data_tobe_tested/kitti/kitti_000008.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/test/data_tobe_tested/kitti/kitti_000008.bin -------------------------------------------------------------------------------- /mmdet3d/ops/paconv/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .paconv import PAConv, PAConvCUDA 3 | 4 | __all__ = ['PAConv', 'PAConvCUDA'] 5 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/pretrained/supervised/epoch=153.ckpt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/deephub/denoisy_model/dmr/pretrained/supervised/epoch=153.ckpt -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/pretrained/unsupervised/epoch=141.ckpt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/deephub/denoisy_model/dmr/pretrained/unsupervised/epoch=141.ckpt -------------------------------------------------------------------------------- /mmdet3d/ops/spconv/overwrite_spconv/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .write_spconv2 import register_spconv2 3 | 4 | __all__ = ['register_spconv2'] 5 | -------------------------------------------------------------------------------- /mmdet3d/core/evaluation/waymo_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .prediction_kitti_to_waymo import KITTI2Waymo 3 | 4 | __all__ = ['KITTI2Waymo'] 5 | -------------------------------------------------------------------------------- /mmdet3d/core/evaluation/kitti_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .eval import kitti_eval, kitti_eval_coco_style 3 | 4 | __all__ = ['kitti_eval', 'kitti_eval_coco_style'] 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.npy 2 | *.onnx 3 | *.trt 4 | *.pyc 5 | *.zip 6 | *.tar 7 | *.exe 8 | *.log 9 | *.pth 10 | **/__pycache__/** 11 | .vscode/ 12 | mmdetection3d/ 13 | lightning_logs/ 14 | checkpoints/ 15 | -------------------------------------------------------------------------------- /mmdet3d/core/evaluation/scannet_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .evaluate_semantic_instance import evaluate_matches, scannet_eval 3 | 4 | __all__ = ['scannet_eval', 'evaluate_matches'] 5 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.core.bbox import AssignResult, BaseAssigner, MaxIoUAssigner 3 | 4 | __all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult'] 5 | -------------------------------------------------------------------------------- /mmdet3d/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import Base3DSegmentor 3 | from .encoder_decoder import EncoderDecoder3D 4 | 5 | __all__ = ['Base3DSegmentor', 'EncoderDecoder3D'] 6 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/pretrained/supervised/events.out.tfevents.1585322227.ubuntu.40310.0: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/deephub/denoisy_model/dmr/pretrained/supervised/events.out.tfevents.1585322227.ubuntu.40310.0 -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/pretrained/unsupervised/events.out.tfevents.1585483449.ubuntu.20813.0: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/deephub/denoisy_model/dmr/pretrained/unsupervised/events.out.tfevents.1585483449.ubuntu.20813.0 -------------------------------------------------------------------------------- /mmdet3d/core/voxel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import build_voxel_generator 3 | from .voxel_generator import VoxelGenerator 4 | 5 | __all__ = ['build_voxel_generator', 'VoxelGenerator'] 6 | -------------------------------------------------------------------------------- /test/data_tobe_tested/nuscenes/n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151603547590.pcd.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/test/data_tobe_tested/nuscenes/n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151603547590.pcd.bin -------------------------------------------------------------------------------- /mmdet3d/models/roi_heads/mask_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .pointwise_semantic_head import PointwiseSemanticHead 3 | from .primitive_head import PrimitiveHead 4 | 5 | __all__ = ['PointwiseSemanticHead', 'PrimitiveHead'] 6 | -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.1.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.1.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.2.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.2.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.005.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.005.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.01.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.01.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.02.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.02.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.03.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.03.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.06.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.06.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.1.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.1.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.12.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.12.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.15.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.15.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.2.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/original/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.2.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.005.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.005.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.01.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.01.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.02.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.02.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.03.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.03.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.06.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.06.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.12.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.12.pickle -------------------------------------------------------------------------------- /data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.15.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TinyDataML/Tiny3D/HEAD/data/data_simulator/integral_lookup_tables/shifted/integral_0m_to_200m_stepsize_0.1m_tau_h_20ns_alpha_0.15.pickle -------------------------------------------------------------------------------- /mmdet3d/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .dgcnn_head import DGCNNHead 3 | from .paconv_head import PAConvHead 4 | from .pointnet2_head import PointNet2Head 5 | 6 | __all__ = ['PointNet2Head', 'DGCNNHead', 'PAConvHead'] 7 | -------------------------------------------------------------------------------- /deephub/denoisy_model/pcp/.gitignore: -------------------------------------------------------------------------------- 1 | /data/ 2 | /models/*.pth 3 | /models/*.txt 4 | /__pycache__/ 5 | /logs/ 6 | /noise_removal/results/ 7 | /noise_removal/logs/ 8 | /outliers_removal/results/ 9 | /outliers_removal/logs/ 10 | /noise_removal/*.pyc 11 | /outliers_removal/*.pyc 12 | -------------------------------------------------------------------------------- /mmdet3d/core/visualizer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .show_result import (show_multi_modality_result, show_result, 3 | show_seg_result) 4 | 5 | __all__ = ['show_result', 'show_seg_result', 'show_multi_modality_result'] 6 | -------------------------------------------------------------------------------- /deephub/denoisy_model/pcp/noise_removal/run.sh: -------------------------------------------------------------------------------- 1 | python eval_pcpnet.py --nrun 1 --shapename galera100k_noise_white_1.00e-02_{i} 2 | python eval_pcpnet.py --nrun 2 --shapename galera100k_noise_white_1.00e-02_{i} 3 | python eval_pcpnet.py --nrun 3 --shapename galera100k_noise_white_1.00e-02_{i} 4 | -------------------------------------------------------------------------------- /mmdet3d/models/model_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .edge_fusion_module import EdgeFusionModule 3 | from .transformer import GroupFree3DMHA 4 | from .vote_module import VoteModule 5 | 6 | __all__ = ['VoteModule', 'GroupFree3DMHA', 'EdgeFusionModule'] 7 | -------------------------------------------------------------------------------- /mmdet3d/ops/dgcnn_modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .dgcnn_fa_module import DGCNNFAModule 3 | from .dgcnn_fp_module import DGCNNFPModule 4 | from .dgcnn_gf_module import DGCNNGFModule 5 | 6 | __all__ = ['DGCNNFAModule', 'DGCNNFPModule', 'DGCNNGFModule'] 7 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/pretrained/README.md: -------------------------------------------------------------------------------- 1 | # Pretrained Models 2 | 3 | We provide both supervisedly and unsupervisedly pretrained models. We also provide TensorBoard logs of the training process. You may view the logs by running: 4 | 5 | ```bash 6 | cd pretrained 7 | tensorboard --logdir ./ 8 | ``` 9 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/data/README.md: -------------------------------------------------------------------------------- 1 | # Dataset 2 | 3 | The training and testing datasets can be downloaded at: [https://drive.google.com/drive/folders/1Qw_bYqsUcekeh165kgRODwIuYpsYIzM1?usp=sharing](https://drive.google.com/drive/folders/1Qw_bYqsUcekeh165kgRODwIuYpsYIzM1?usp=sharing) 4 | 5 | Extract the two zip files here to use them. 6 | 7 | -------------------------------------------------------------------------------- /mmdet3d/models/middle_encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .pillar_scatter import PointPillarsScatter 3 | from .sparse_encoder import SparseEncoder, SparseEncoderSASSD 4 | from .sparse_unet import SparseUNet 5 | 6 | __all__ = [ 7 | 'PointPillarsScatter', 'SparseEncoder', 'SparseEncoderSASSD', 'SparseUNet' 8 | ] 9 | -------------------------------------------------------------------------------- /mmdet3d/models/voxel_encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .pillar_encoder import DynamicPillarFeatureNet, PillarFeatureNet 3 | from .voxel_encoder import DynamicSimpleVFE, DynamicVFE, HardSimpleVFE, HardVFE 4 | 5 | __all__ = [ 6 | 'PillarFeatureNet', 'DynamicPillarFeatureNet', 'HardVFE', 'DynamicVFE', 7 | 'HardSimpleVFE', 'DynamicSimpleVFE' 8 | ] 9 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/ops/emd/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='emd', 6 | ext_modules=[ 7 | CUDAExtension('emd', [ 8 | 'emd.cpp', 9 | 'emd_cuda.cu', 10 | ]), 11 | ], 12 | cmdclass={ 13 | 'build_ext': BuildExtension 14 | }) -------------------------------------------------------------------------------- /mmdet3d/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.models.necks.fpn import FPN 3 | from .dla_neck import DLANeck 4 | from .imvoxel_neck import OutdoorImVoxelNeck 5 | from .pointnet2_fp_neck import PointNetFPNeck 6 | from .second_fpn import SECONDFPN 7 | 8 | __all__ = [ 9 | 'FPN', 'SECONDFPN', 'OutdoorImVoxelNeck', 'PointNetFPNeck', 'DLANeck' 10 | ] 11 | -------------------------------------------------------------------------------- /mmdet3d/models/roi_heads/roi_extractors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor 3 | from .single_roiaware_extractor import Single3DRoIAwareExtractor 4 | from .single_roipoint_extractor import Single3DRoIPointExtractor 5 | 6 | __all__ = [ 7 | 'SingleRoIExtractor', 'Single3DRoIAwareExtractor', 8 | 'Single3DRoIPointExtractor' 9 | ] 10 | -------------------------------------------------------------------------------- /mmdet3d/models/fusion_layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .coord_transform import (apply_3d_transformation, bbox_2d_transform, 3 | coord_2d_transform) 4 | from .point_fusion import PointFusion 5 | from .vote_fusion import VoteFusion 6 | 7 | __all__ = [ 8 | 'PointFusion', 'VoteFusion', 'apply_3d_transformation', 9 | 'bbox_2d_transform', 'coord_2d_transform' 10 | ] 11 | -------------------------------------------------------------------------------- /mmdet3d/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .anchor import * # noqa: F401, F403 3 | from .bbox import * # noqa: F401, F403 4 | from .evaluation import * # noqa: F401, F403 5 | from .points import * # noqa: F401, F403 6 | from .post_processing import * # noqa: F401, F403 7 | from .utils import * # noqa: F401, F403 8 | from .visualizer import * # noqa: F401, F403 9 | from .voxel import * # noqa: F401, F403 10 | -------------------------------------------------------------------------------- /mmdet3d/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .clip_sigmoid import clip_sigmoid 3 | from .edge_indices import get_edge_indices 4 | from .gen_keypoints import get_keypoints 5 | from .handle_objs import filter_outside_objs, handle_proj_objs 6 | from .mlp import MLP 7 | 8 | __all__ = [ 9 | 'clip_sigmoid', 'MLP', 'get_edge_indices', 'filter_outside_objs', 10 | 'handle_proj_objs', 'get_keypoints' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdet3d/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .indoor_eval import indoor_eval 3 | from .instance_seg_eval import instance_seg_eval 4 | from .kitti_utils import kitti_eval, kitti_eval_coco_style 5 | from .lyft_eval import lyft_eval 6 | from .seg_eval import seg_eval 7 | 8 | __all__ = [ 9 | 'kitti_eval_coco_style', 'kitti_eval', 'indoor_eval', 'lyft_eval', 10 | 'seg_eval', 'instance_seg_eval' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdet3d/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .array_converter import ArrayConverter, array_converter 3 | from .gaussian import (draw_heatmap_gaussian, ellip_gaussian2D, gaussian_2d, 4 | gaussian_radius, get_ellip_gaussian_2D) 5 | 6 | __all__ = [ 7 | 'gaussian_2d', 'gaussian_radius', 'draw_heatmap_gaussian', 8 | 'ArrayConverter', 'array_converter', 'ellip_gaussian2D', 9 | 'get_ellip_gaussian_2D' 10 | ] 11 | -------------------------------------------------------------------------------- /mmdet3d/ops/spconv/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .overwrite_spconv.write_spconv2 import register_spconv2 3 | 4 | try: 5 | import spconv 6 | except ImportError: 7 | IS_SPCONV2_AVAILABLE = False 8 | else: 9 | if hasattr(spconv, '__version__') and spconv.__version__ >= '2.0.0': 10 | IS_SPCONV2_AVAILABLE = register_spconv2() 11 | else: 12 | IS_SPCONV2_AVAILABLE = False 13 | 14 | __all__ = ['IS_SPCONV2_AVAILABLE'] 15 | -------------------------------------------------------------------------------- /mmdet3d/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.core.anchor import build_prior_generator 3 | from .anchor_3d_generator import (AlignedAnchor3DRangeGenerator, 4 | AlignedAnchor3DRangeGeneratorPerCls, 5 | Anchor3DRangeGenerator) 6 | 7 | __all__ = [ 8 | 'AlignedAnchor3DRangeGenerator', 'Anchor3DRangeGenerator', 9 | 'build_prior_generator', 'AlignedAnchor3DRangeGeneratorPerCls' 10 | ] 11 | -------------------------------------------------------------------------------- /mmdet3d/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import Registry, build_from_cfg, print_log 3 | 4 | from .collect_env import collect_env 5 | from .compat_cfg import compat_cfg 6 | from .logger import get_root_logger 7 | from .misc import find_latest_checkpoint 8 | from .setup_env import setup_multi_processes 9 | 10 | __all__ = [ 11 | 'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env', 12 | 'print_log', 'setup_multi_processes', 'find_latest_checkpoint', 13 | 'compat_cfg' 14 | ] 15 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/iou_calculators/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .iou3d_calculator import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D, 3 | BboxOverlapsNearest3D, 4 | axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d, 5 | bbox_overlaps_nearest_3d) 6 | 7 | __all__ = [ 8 | 'BboxOverlapsNearest3D', 'BboxOverlaps3D', 'bbox_overlaps_nearest_3d', 9 | 'bbox_overlaps_3d', 'AxisAlignedBboxOverlaps3D', 10 | 'axis_aligned_bbox_overlaps_3d' 11 | ] 12 | -------------------------------------------------------------------------------- /mmdet3d/models/utils/clip_sigmoid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | 5 | def clip_sigmoid(x, eps=1e-4): 6 | """Sigmoid function for input feature. 7 | 8 | Args: 9 | x (torch.Tensor): Input feature map with the shape of [B, N, H, W]. 10 | eps (float, optional): Lower bound of the range to be clamped to. 11 | Defaults to 1e-4. 12 | 13 | Returns: 14 | torch.Tensor: Feature map after sigmoid. 15 | """ 16 | y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) 17 | return y 18 | -------------------------------------------------------------------------------- /mmdet3d/core/voxel/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcv 3 | 4 | from . import voxel_generator 5 | 6 | 7 | def build_voxel_generator(cfg, **kwargs): 8 | """Builder of voxel generator.""" 9 | if isinstance(cfg, voxel_generator.VoxelGenerator): 10 | return cfg 11 | elif isinstance(cfg, dict): 12 | return mmcv.runner.obj_from_dict( 13 | cfg, voxel_generator, default_args=kwargs) 14 | else: 15 | raise TypeError('Invalid type {} for building a sampler'.format( 16 | type(cfg))) 17 | -------------------------------------------------------------------------------- /mmdet3d/ops/pointnet_modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import build_sa_module 3 | from .paconv_sa_module import (PAConvCUDASAModule, PAConvCUDASAModuleMSG, 4 | PAConvSAModule, PAConvSAModuleMSG) 5 | from .point_fp_module import PointFPModule 6 | from .point_sa_module import PointSAModule, PointSAModuleMSG 7 | 8 | __all__ = [ 9 | 'build_sa_module', 'PointSAModuleMSG', 'PointSAModule', 'PointFPModule', 10 | 'PAConvSAModule', 'PAConvSAModuleMSG', 'PAConvCUDASAModule', 11 | 'PAConvCUDASAModuleMSG' 12 | ] 13 | -------------------------------------------------------------------------------- /test/test_data_ops/test_filter.py: -------------------------------------------------------------------------------- 1 | from data.data_filter.data_filter import lidar_filter 2 | import numpy as np 3 | 4 | if __name__ == "__main__": 5 | 6 | lidar_data = {"points": np.random.randn(100, 4)} 7 | lidar_data = lidar_filter( lidar_data, 8 | method = "bilateral_filter", 9 | params = {"radius" : 0.2, 10 | "sigma_d":0.2, 11 | "sigma_n":0.2, 12 | } 13 | ) 14 | print(lidar_data["points"].shape) -------------------------------------------------------------------------------- /mmdet3d/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | 3 | __version__ = '1.0.0rc3' 4 | short_version = __version__ 5 | 6 | 7 | def parse_version_info(version_str): 8 | version_info = [] 9 | for x in version_str.split('.'): 10 | if x.isdigit(): 11 | version_info.append(int(x)) 12 | elif x.find('rc') != -1: 13 | patch_version = x.split('rc') 14 | version_info.append(int(patch_version[0])) 15 | version_info.append(f'rc{patch_version[1]}') 16 | return tuple(version_info) 17 | 18 | 19 | version_info = parse_version_info(__version__) 20 | -------------------------------------------------------------------------------- /test/test_data_ops/test_loader.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import unittest 3 | 4 | from data.data_loader.data_loader import lidar_loader 5 | 6 | class TestDataDenoisor(unittest.TestCase): 7 | 8 | 9 | def test_data_loader_bin(self): 10 | data_loaded=None 11 | lidar_data_path = 'test/date_tobe_tested/nuscenes/n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151603547590.pcd.bin' 12 | data_loaded = lidar_loader(lidar_data_path, None, 'pcd.bin') 13 | 14 | print('output.shape') 15 | print(data_loaded.shape) 16 | assert data_loaded.any() != None 17 | 18 | 19 | if __name__ == '__main__': 20 | unittest.main() 21 | -------------------------------------------------------------------------------- /mmdet3d/apis/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .inference import (convert_SyncBN, inference_detector, 3 | inference_mono_3d_detector, 4 | inference_multi_modality_detector, inference_segmentor, 5 | init_model, show_result_meshlab) 6 | from .test import single_gpu_test 7 | from .train import init_random_seed, train_model 8 | 9 | __all__ = [ 10 | 'inference_detector', 'init_model', 'single_gpu_test', 11 | 'inference_mono_3d_detector', 'show_result_meshlab', 'convert_SyncBN', 12 | 'train_model', 'inference_multi_modality_detector', 'inference_segmentor', 13 | 'init_random_seed' 14 | ] 15 | -------------------------------------------------------------------------------- /mmdet3d/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.models.backbones import SSDVGG, HRNet, ResNet, ResNetV1d, ResNeXt 3 | from .dgcnn import DGCNNBackbone 4 | from .dla import DLANet 5 | from .mink_resnet import MinkResNet 6 | from .multi_backbone import MultiBackbone 7 | from .nostem_regnet import NoStemRegNet 8 | from .pointnet2_sa_msg import PointNet2SAMSG 9 | from .pointnet2_sa_ssg import PointNet2SASSG 10 | from .second import SECOND 11 | 12 | __all__ = [ 13 | 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'NoStemRegNet', 14 | 'SECOND', 'DGCNNBackbone', 'PointNet2SASSG', 'PointNet2SAMSG', 15 | 'MultiBackbone', 'DLANet', 'MinkResNet' 16 | ] 17 | -------------------------------------------------------------------------------- /mmdet3d/models/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_3droi_head import Base3DRoIHead 3 | from .bbox_heads import PartA2BboxHead 4 | from .h3d_roi_head import H3DRoIHead 5 | from .mask_heads import PointwiseSemanticHead, PrimitiveHead 6 | from .part_aggregation_roi_head import PartAggregationROIHead 7 | from .point_rcnn_roi_head import PointRCNNRoIHead 8 | from .roi_extractors import Single3DRoIAwareExtractor, SingleRoIExtractor 9 | 10 | __all__ = [ 11 | 'Base3DRoIHead', 'PartAggregationROIHead', 'PointwiseSemanticHead', 12 | 'Single3DRoIAwareExtractor', 'PartA2BboxHead', 'SingleRoIExtractor', 13 | 'H3DRoIHead', 'PrimitiveHead', 'PointRCNNRoIHead' 14 | ] 15 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.core.bbox.samplers import (BaseSampler, CombinedSampler, 3 | InstanceBalancedPosSampler, 4 | IoUBalancedNegSampler, OHEMSampler, 5 | PseudoSampler, RandomSampler, 6 | SamplingResult) 7 | from .iou_neg_piecewise_sampler import IoUNegPiecewiseSampler 8 | 9 | __all__ = [ 10 | 'BaseSampler', 'PseudoSampler', 'RandomSampler', 11 | 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 12 | 'OHEMSampler', 'SamplingResult', 'IoUNegPiecewiseSampler' 13 | ] 14 | -------------------------------------------------------------------------------- /mmdet3d/models/roi_heads/bbox_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.models.roi_heads.bbox_heads import (BBoxHead, ConvFCBBoxHead, 3 | DoubleConvFCBBoxHead, 4 | Shared2FCBBoxHead, 5 | Shared4Conv1FCBBoxHead) 6 | from .h3d_bbox_head import H3DBboxHead 7 | from .parta2_bbox_head import PartA2BboxHead 8 | from .point_rcnn_bbox_head import PointRCNNBboxHead 9 | 10 | __all__ = [ 11 | 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 12 | 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'PartA2BboxHead', 13 | 'H3DBboxHead', 'PointRCNNBboxHead' 14 | ] 15 | -------------------------------------------------------------------------------- /mmdet3d/core/post_processing/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.core.post_processing import (merge_aug_bboxes, merge_aug_masks, 3 | merge_aug_proposals, merge_aug_scores, 4 | multiclass_nms) 5 | from .box3d_nms import (aligned_3d_nms, box3d_multiclass_nms, circle_nms, 6 | nms_bev, nms_normal_bev) 7 | from .merge_augs import merge_aug_bboxes_3d 8 | 9 | __all__ = [ 10 | 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 11 | 'merge_aug_scores', 'merge_aug_masks', 'box3d_multiclass_nms', 12 | 'aligned_3d_nms', 'merge_aug_bboxes_3d', 'circle_nms', 'nms_bev', 13 | 'nms_normal_bev' 14 | ] 15 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/smoke_mono3d.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import DETECTORS 3 | from .single_stage_mono3d import SingleStageMono3DDetector 4 | 5 | 6 | @DETECTORS.register_module() 7 | class SMOKEMono3D(SingleStageMono3DDetector): 8 | r"""SMOKE `_ for monocular 3D object 9 | detection. 10 | 11 | """ 12 | 13 | def __init__(self, 14 | backbone, 15 | neck, 16 | bbox_head, 17 | train_cfg=None, 18 | test_cfg=None, 19 | pretrained=None): 20 | super(SMOKEMono3D, self).__init__(backbone, neck, bbox_head, train_cfg, 21 | test_cfg, pretrained) 22 | -------------------------------------------------------------------------------- /mmdet3d/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.models.losses import FocalLoss, SmoothL1Loss, binary_cross_entropy 3 | from .axis_aligned_iou_loss import AxisAlignedIoULoss, axis_aligned_iou_loss 4 | from .chamfer_distance import ChamferDistance, chamfer_distance 5 | from .multibin_loss import MultiBinLoss 6 | from .paconv_regularization_loss import PAConvRegularizationLoss 7 | from .uncertain_smooth_l1_loss import UncertainL1Loss, UncertainSmoothL1Loss 8 | 9 | __all__ = [ 10 | 'FocalLoss', 'SmoothL1Loss', 'binary_cross_entropy', 'ChamferDistance', 11 | 'chamfer_distance', 'axis_aligned_iou_loss', 'AxisAlignedIoULoss', 12 | 'PAConvRegularizationLoss', 'UncertainL1Loss', 'UncertainSmoothL1Loss', 13 | 'MultiBinLoss' 14 | ] 15 | -------------------------------------------------------------------------------- /test/test_data_ops/test_visualizor.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import unittest 4 | 5 | import data 6 | import data.data_simulator 7 | 8 | from data.data_visualizor.data_visualizor import lidar_visualizor 9 | 10 | 11 | class TestDataVisualizor(unittest.TestCase): 12 | 13 | def test_data_visulizor_open3d(self): 14 | points = torch.randn(2000, 4) 15 | pred_bboxes = torch.randn(2000, 7) 16 | gt_bboxes = torch.randn(2000, 7) 17 | lidar_data = {'points':points, 'pred_bboxes':pred_bboxes, 'gt_bboxes':gt_bboxes} 18 | data_simulated = lidar_visualizor(lidar_data, out_dir='./',filename='test_visualizor', method='open3d') 19 | 20 | assert os.path.exists('./test_visualizor') 21 | 22 | if __name__ == '__main__': 23 | unittest.main() 24 | -------------------------------------------------------------------------------- /mmdet3d/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import collect_env as collect_base_env 3 | from mmcv.utils import get_git_hash 4 | 5 | import mmdet 6 | import mmdet3d 7 | import mmseg 8 | from mmdet3d.ops.spconv import IS_SPCONV2_AVAILABLE 9 | 10 | 11 | def collect_env(): 12 | """Collect the information of the running environments.""" 13 | env_info = collect_base_env() 14 | env_info['MMDetection'] = mmdet.__version__ 15 | env_info['MMSegmentation'] = mmseg.__version__ 16 | env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7] 17 | env_info['spconv2.0'] = IS_SPCONV2_AVAILABLE 18 | return env_info 19 | 20 | 21 | if __name__ == '__main__': 22 | for name, val in collect_env().items(): 23 | print(f'{name}: {val}') 24 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/ssd3dnet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import DETECTORS 3 | from .votenet import VoteNet 4 | 5 | 6 | @DETECTORS.register_module() 7 | class SSD3DNet(VoteNet): 8 | """3DSSDNet model. 9 | 10 | https://arxiv.org/abs/2002.10187.pdf 11 | """ 12 | 13 | def __init__(self, 14 | backbone, 15 | bbox_head=None, 16 | train_cfg=None, 17 | test_cfg=None, 18 | init_cfg=None, 19 | pretrained=None): 20 | super(SSD3DNet, self).__init__( 21 | backbone=backbone, 22 | bbox_head=bbox_head, 23 | train_cfg=train_cfg, 24 | test_cfg=test_cfg, 25 | init_cfg=init_cfg, 26 | pretrained=pretrained) 27 | -------------------------------------------------------------------------------- /test/test_data_ops/test_denoisor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import unittest 3 | 4 | 5 | import data 6 | import data.data_denoisor 7 | from data.data_denoisor.data_denoisor import lidar_denoisor 8 | 9 | class TestDataDenoisor(unittest.TestCase): 10 | 11 | 12 | # noinspection DuplicatedCode 13 | def test_data_denoisor_dmr(self): 14 | points = torch.randn(20000, 3) 15 | lidar_data = {'points':points} 16 | data_denoised = lidar_denoisor(lidar_data, 'dmr') 17 | assert not data_denoised['points'].equal(points) 18 | 19 | def test_data_denoisor_pcp(self): 20 | points = torch.randn(4, 500, 3) 21 | lidar_data = {'points':points} 22 | data_denoised = lidar_denoisor(lidar_data, 'pcp') 23 | assert not data_denoised['points'].equal(points) 24 | if __name__ == '__main__': 25 | unittest.main() 26 | -------------------------------------------------------------------------------- /test/test_data_ops/test_simulator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import unittest 3 | 4 | from data.data_simulator.data_simulator import lidar_simulator 5 | 6 | 7 | class TestDataSimulator(unittest.TestCase): 8 | 9 | 10 | def test_data_simulator_foggy(self): 11 | points = torch.randn(2000, 4) 12 | lidar_data = {'points':points} 13 | data_simulated = lidar_simulator(lidar_data, 'foggy') 14 | print('=========== test_data_simulator_foggy ===========') 15 | print('input.type') 16 | print(type(points)) 17 | print('output.type') 18 | print(type(data_simulated['points'])) 19 | print('input.dtype') 20 | print(points.dtype) 21 | print('output.dtype') 22 | print(data_simulated['points'].dtype) 23 | 24 | assert not data_simulated['points'].equal(points) 25 | 26 | 27 | if __name__ == '__main__': 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/structures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_box3d import BaseInstance3DBoxes 3 | from .box_3d_mode import Box3DMode 4 | from .cam_box3d import CameraInstance3DBoxes 5 | from .coord_3d_mode import Coord3DMode 6 | from .depth_box3d import DepthInstance3DBoxes 7 | from .lidar_box3d import LiDARInstance3DBoxes 8 | from .utils import (get_box_type, get_proj_mat_by_coord_type, limit_period, 9 | mono_cam_box2vis, points_cam2img, points_img2cam, 10 | rotation_3d_in_axis, xywhr2xyxyr) 11 | 12 | __all__ = [ 13 | 'Box3DMode', 'BaseInstance3DBoxes', 'LiDARInstance3DBoxes', 14 | 'CameraInstance3DBoxes', 'DepthInstance3DBoxes', 'xywhr2xyxyr', 15 | 'get_box_type', 'rotation_3d_in_axis', 'limit_period', 'points_cam2img', 16 | 'points_img2cam', 'Coord3DMode', 'mono_cam_box2vis', 17 | 'get_proj_mat_by_coord_type' 18 | ] 19 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/fcos_mono3d.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import DETECTORS 3 | from .single_stage_mono3d import SingleStageMono3DDetector 4 | 5 | 6 | @DETECTORS.register_module() 7 | class FCOSMono3D(SingleStageMono3DDetector): 8 | r"""`FCOS3D `_ for monocular 3D object detection. 9 | 10 | Currently please refer to our entry on the 11 | `leaderboard `_. 12 | """ # noqa: E501 13 | 14 | def __init__(self, 15 | backbone, 16 | neck, 17 | bbox_head, 18 | train_cfg=None, 19 | test_cfg=None, 20 | pretrained=None): 21 | super(FCOSMono3D, self).__init__(backbone, neck, bbox_head, train_cfg, 22 | test_cfg, pretrained) 23 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/coders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.core.bbox import build_bbox_coder 3 | from .anchor_free_bbox_coder import AnchorFreeBBoxCoder 4 | from .centerpoint_bbox_coders import CenterPointBBoxCoder 5 | from .delta_xyzwhlr_bbox_coder import DeltaXYZWLHRBBoxCoder 6 | from .fcos3d_bbox_coder import FCOS3DBBoxCoder 7 | from .groupfree3d_bbox_coder import GroupFree3DBBoxCoder 8 | from .monoflex_bbox_coder import MonoFlexCoder 9 | from .partial_bin_based_bbox_coder import PartialBinBasedBBoxCoder 10 | from .pgd_bbox_coder import PGDBBoxCoder 11 | from .point_xyzwhlr_bbox_coder import PointXYZWHLRBBoxCoder 12 | from .smoke_bbox_coder import SMOKECoder 13 | 14 | __all__ = [ 15 | 'build_bbox_coder', 'DeltaXYZWLHRBBoxCoder', 'PartialBinBasedBBoxCoder', 16 | 'CenterPointBBoxCoder', 'AnchorFreeBBoxCoder', 'GroupFree3DBBoxCoder', 17 | 'PointXYZWHLRBBoxCoder', 'FCOS3DBBoxCoder', 'PGDBBoxCoder', 'SMOKECoder', 18 | 'MonoFlexCoder' 19 | ] 20 | -------------------------------------------------------------------------------- /data/data_qualificator/data_qualificator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from qualificator_utils import find_label_issues 4 | 5 | def lidar_qualificator(lidar_dataaet, pred, method): 6 | """ 7 | Use different Data quality assessment methods to qualificate lidar data. 8 | Args: 9 | lidar_data: dict 10 | method: str 11 | return: 12 | simulated lidar_data 13 | Reference: 14 | https://github.com/cleanlab/cleanlab/blob/master/cleanlab/classification.py 15 | 16 | """ 17 | points = lidar_dataset['points'] 18 | labels = lidar_dataaet['gt_label'] 19 | # pred_bbox = lidar_data['pred_bbox'] 20 | # gt_bbox = lidar_data['gt_bbox'] 21 | points = points.numpy() 22 | 23 | if method == 'confident_learning': 24 | points, pred_bbox, gt_bbox, issues = find_label_issues(points, labelsm pred) 25 | 26 | lidar_data['points'] = points 27 | lidar_data['pred_bbox'] = pred_bbox 28 | lidar_data['gt_bbox'] = gt_bbox 29 | return lidar_data, issues 30 | -------------------------------------------------------------------------------- /mmdet3d/core/points/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_points import BasePoints 3 | from .cam_points import CameraPoints 4 | from .depth_points import DepthPoints 5 | from .lidar_points import LiDARPoints 6 | 7 | __all__ = ['BasePoints', 'CameraPoints', 'DepthPoints', 'LiDARPoints'] 8 | 9 | 10 | def get_points_type(points_type): 11 | """Get the class of points according to coordinate type. 12 | 13 | Args: 14 | points_type (str): The type of points coordinate. 15 | The valid value are "CAMERA", "LIDAR", or "DEPTH". 16 | 17 | Returns: 18 | class: Points type. 19 | """ 20 | if points_type == 'CAMERA': 21 | points_cls = CameraPoints 22 | elif points_type == 'LIDAR': 23 | points_cls = LiDARPoints 24 | elif points_type == 'DEPTH': 25 | points_cls = DepthPoints 26 | else: 27 | raise ValueError('Only "points_type" of "CAMERA", "LIDAR", or "DEPTH"' 28 | f' are supported, got {points_type}') 29 | 30 | return points_cls 31 | -------------------------------------------------------------------------------- /model/model_compressor/compressor.py: -------------------------------------------------------------------------------- 1 | import torch.nn.utils.prune as prune 2 | import torch 3 | 4 | def torch_prune(model, prune_list, amount_list): 5 | 6 | for n, module in model.named_modules(): 7 | for j, prune_item in enumerate(prune_list): 8 | # print(j) 9 | # print(prune_item) 10 | if isinstance(module, prune_item): 11 | prune.random_unstructured(module,name = 'weight', amount = amount_list[j]) 12 | prune.remove(module,'weight') 13 | 14 | def dynamic_quant(model): 15 | model.cpu() 16 | torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8) 17 | model.cuda() 18 | 19 | def static_quant(model, input_data): 20 | model.qconfig = torch.quantization.get_default_qconfig('fbgemm') 21 | model_prepared = torch.quantization.prepare(model) 22 | model_prepared(*input_data) 23 | model_prepared.cpu() 24 | model_int8 = torch.quantization.convert(model_prepared, inplace=True) 25 | 26 | return model_int8 27 | # torch_out = model_int8(*input_data) -------------------------------------------------------------------------------- /mmdet3d/models/dense_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .anchor3d_head import Anchor3DHead 3 | from .anchor_free_mono3d_head import AnchorFreeMono3DHead 4 | from .base_conv_bbox_head import BaseConvBboxHead 5 | from .base_mono3d_dense_head import BaseMono3DDenseHead 6 | from .centerpoint_head import CenterHead 7 | from .fcos_mono3d_head import FCOSMono3DHead 8 | from .free_anchor3d_head import FreeAnchor3DHead 9 | from .groupfree3d_head import GroupFree3DHead 10 | from .monoflex_head import MonoFlexHead 11 | from .parta2_rpn_head import PartA2RPNHead 12 | from .pgd_head import PGDHead 13 | from .point_rpn_head import PointRPNHead 14 | from .shape_aware_head import ShapeAwareHead 15 | from .smoke_mono3d_head import SMOKEMono3DHead 16 | from .ssd_3d_head import SSD3DHead 17 | from .vote_head import VoteHead 18 | 19 | __all__ = [ 20 | 'Anchor3DHead', 'FreeAnchor3DHead', 'PartA2RPNHead', 'VoteHead', 21 | 'SSD3DHead', 'BaseConvBboxHead', 'CenterHead', 'ShapeAwareHead', 22 | 'BaseMono3DDenseHead', 'AnchorFreeMono3DHead', 'FCOSMono3DHead', 23 | 'GroupFree3DHead', 'PointRPNHead', 'SMOKEMono3DHead', 'PGDHead', 24 | 'MonoFlexHead' 25 | ] 26 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import Base3DDetector 3 | from .centerpoint import CenterPoint 4 | from .dynamic_voxelnet import DynamicVoxelNet 5 | from .fcos_mono3d import FCOSMono3D 6 | from .groupfree3dnet import GroupFree3DNet 7 | from .h3dnet import H3DNet 8 | from .imvotenet import ImVoteNet 9 | from .imvoxelnet import ImVoxelNet 10 | from .mvx_faster_rcnn import DynamicMVXFasterRCNN, MVXFasterRCNN 11 | from .mvx_two_stage import MVXTwoStageDetector 12 | from .parta2 import PartA2 13 | from .point_rcnn import PointRCNN 14 | from .sassd import SASSD 15 | from .single_stage_mono3d import SingleStageMono3DDetector 16 | from .smoke_mono3d import SMOKEMono3D 17 | from .ssd3dnet import SSD3DNet 18 | from .votenet import VoteNet 19 | from .voxelnet import VoxelNet 20 | 21 | __all__ = [ 22 | 'Base3DDetector', 'VoxelNet', 'DynamicVoxelNet', 'MVXTwoStageDetector', 23 | 'DynamicMVXFasterRCNN', 'MVXFasterRCNN', 'PartA2', 'VoteNet', 'H3DNet', 24 | 'CenterPoint', 'SSD3DNet', 'ImVoteNet', 'SingleStageMono3DDetector', 25 | 'FCOSMono3D', 'ImVoxelNet', 'GroupFree3DNet', 'PointRCNN', 'SMOKEMono3D', 26 | 'SASSD' 27 | ] 28 | -------------------------------------------------------------------------------- /deephub/denoisy_model/pcp/noise_removal/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # quaternion a + bi + cj + dk should be given in the form [a,b,c,d] 4 | def batch_quat_to_rotmat(q, out=None): 5 | 6 | batchsize = q.size(0) 7 | 8 | if out is None: 9 | out = torch.FloatTensor(batchsize, 3, 3) 10 | 11 | # 2 / squared quaternion 2-norm 12 | s = 2/torch.sum(q.pow(2), 1) 13 | 14 | # coefficients of the Hamilton product of the quaternion with itself 15 | h = torch.bmm(q.unsqueeze(2), q.unsqueeze(1)) 16 | 17 | out[:, 0, 0] = 1 - (h[:, 2, 2] + h[:, 3, 3]).mul(s) 18 | out[:, 0, 1] = (h[:, 1, 2] - h[:, 3, 0]).mul(s) 19 | out[:, 0, 2] = (h[:, 1, 3] + h[:, 2, 0]).mul(s) 20 | 21 | out[:, 1, 0] = (h[:, 1, 2] + h[:, 3, 0]).mul(s) 22 | out[:, 1, 1] = 1 - (h[:, 1, 1] + h[:, 3, 3]).mul(s) 23 | out[:, 1, 2] = (h[:, 2, 3] - h[:, 1, 0]).mul(s) 24 | 25 | out[:, 2, 0] = (h[:, 1, 3] - h[:, 2, 0]).mul(s) 26 | out[:, 2, 1] = (h[:, 2, 3] + h[:, 1, 0]).mul(s) 27 | out[:, 2, 2] = 1 - (h[:, 1, 1] + h[:, 2, 2]).mul(s) 28 | 29 | return out 30 | 31 | def cos_angle(v1, v2): 32 | 33 | return torch.bmm(v1.unsqueeze(1), v2.unsqueeze(2)).view(-1) / torch.clamp(v1.norm(2, 1) * v2.norm(2, 1), min=0.000001) 34 | -------------------------------------------------------------------------------- /deephub/denoisy_model/pcp/outliers_removal/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # quaternion a + bi + cj + dk should be given in the form [a,b,c,d] 4 | def batch_quat_to_rotmat(q, out=None): 5 | 6 | batchsize = q.size(0) 7 | 8 | if out is None: 9 | out = torch.FloatTensor(batchsize, 3, 3) 10 | 11 | # 2 / squared quaternion 2-norm 12 | s = 2/torch.sum(q.pow(2), 1) 13 | 14 | # coefficients of the Hamilton product of the quaternion with itself 15 | h = torch.bmm(q.unsqueeze(2), q.unsqueeze(1)) 16 | 17 | out[:, 0, 0] = 1 - (h[:, 2, 2] + h[:, 3, 3]).mul(s) 18 | out[:, 0, 1] = (h[:, 1, 2] - h[:, 3, 0]).mul(s) 19 | out[:, 0, 2] = (h[:, 1, 3] + h[:, 2, 0]).mul(s) 20 | 21 | out[:, 1, 0] = (h[:, 1, 2] + h[:, 3, 0]).mul(s) 22 | out[:, 1, 1] = 1 - (h[:, 1, 1] + h[:, 3, 3]).mul(s) 23 | out[:, 1, 2] = (h[:, 2, 3] - h[:, 1, 0]).mul(s) 24 | 25 | out[:, 2, 0] = (h[:, 1, 3] - h[:, 2, 0]).mul(s) 26 | out[:, 2, 1] = (h[:, 2, 3] + h[:, 1, 0]).mul(s) 27 | out[:, 2, 2] = 1 - (h[:, 1, 1] + h[:, 2, 2]).mul(s) 28 | 29 | return out 30 | 31 | def cos_angle(v1, v2): 32 | 33 | return torch.bmm(v1.unsqueeze(1), v2.unsqueeze(2)).view(-1) / torch.clamp(v1.norm(2, 1) * v2.norm(2, 1), min=0.000001) 34 | -------------------------------------------------------------------------------- /mmdet3d/utils/logger.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import logging 3 | 4 | from mmcv.utils import get_logger 5 | 6 | 7 | def get_root_logger(log_file=None, log_level=logging.INFO, name='mmdet3d'): 8 | """Get root logger and add a keyword filter to it. 9 | 10 | The logger will be initialized if it has not been initialized. By default a 11 | StreamHandler will be added. If `log_file` is specified, a FileHandler will 12 | also be added. The name of the root logger is the top-level package name, 13 | e.g., "mmdet3d". 14 | 15 | Args: 16 | log_file (str, optional): File path of log. Defaults to None. 17 | log_level (int, optional): The level of logger. 18 | Defaults to logging.INFO. 19 | name (str, optional): The name of the root logger, also used as a 20 | filter keyword. Defaults to 'mmdet3d'. 21 | 22 | Returns: 23 | :obj:`logging.Logger`: The obtained logger 24 | """ 25 | logger = get_logger(name=name, log_file=log_file, log_level=log_level) 26 | 27 | # add a logging filter 28 | logging_filter = logging.Filter(name) 29 | logging_filter.filter = lambda record: record.find(name) != -1 30 | 31 | return logger 32 | -------------------------------------------------------------------------------- /data/data_loader/data_loader.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import open3d as o3d 3 | # import laspy 4 | # import pcl 5 | 6 | def lidar_loader(lidar_data_path, lidar_dataset_path = None, type_name = None): 7 | """ 8 | Use different lidar load methods to load lidar data. 9 | Args: 10 | lidar_data_path : str 11 | lidar_dataset_path : str 12 | type_name: str 13 | return: 14 | lidar_data : dict 15 | """ 16 | if not lidar_dataset_path: 17 | 18 | if type_name == 'pcd.bin': 19 | points = np.fromfile(lidar_data_path, dtype=np.float32, count=-1).reshape([-1, 4]) 20 | 21 | if type_name == '.xyz' or type_name == '.xyzn' or type_name == '.xyzrgb' or type_name == '.pts' or type_name == '.ply' or type_name == '.pcd': 22 | # .xyz .xyzn .xyzrgb .pts .ply .pcd 23 | pcd = o3d.io.read_point_cloud(lidar_data_path) 24 | points = np.asarray(pcd.points) 25 | 26 | # if type_name == 'laspy': 27 | # data = laspy.read(lidar_data_path) 28 | # points = np.vstack((data.X, data.Y, data.Z)).transpose() 29 | 30 | # if method_name == 'pcl': 31 | # points = pcl.load(lidar_data_path) 32 | 33 | lidar_data = dict() 34 | lidar_data["points"] = points 35 | return points 36 | -------------------------------------------------------------------------------- /data/data_simulator/data_simulator.py: -------------------------------------------------------------------------------- 1 | #import pylisa 2 | import numpy as np 3 | import torch 4 | 5 | from .simulator_fog_utils import ParameterSet, simulate_fog 6 | 7 | def lidar_simulator(lidar_data, method): 8 | """ 9 | Use different extreme weather lidar simulation methods to augment lidar data. 10 | Args: 11 | lidar_data: dict 12 | method: str 13 | return: 14 | simulated lidar_data 15 | Reference: 16 | https://github.com/velatkilic/LISA 17 | """ 18 | points_original = lidar_data['points'] 19 | points = points_original.numpy() 20 | 21 | if method == 'rainy': 22 | import pylisa 23 | lidar = pylisa.Lidar() # lidar object 24 | water = pylisa.Water() # material object 25 | rain = pylisa.MarshallPalmerRain() # particle distribution model 26 | 27 | augm = pylisa.Lisa(lidar, water, rain) 28 | 29 | pcnew = augm.augment(points, 30) # for a rain rate of 30 mm/hr 30 | points = augm.augment(pcnew) 31 | 32 | 33 | if method == "foggy": 34 | parameter_set = ParameterSet(alpha=0.5, gamma=0.000001) 35 | 36 | points, _, _ = simulate_fog(parameter_set, points, 10) 37 | 38 | lidar_data['points'] = torch.from_numpy(points).to(lidar_data['points'].device).type_as(points_original) 39 | 40 | return lidar_data 41 | -------------------------------------------------------------------------------- /mmdet3d/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import glob 3 | import os.path as osp 4 | import warnings 5 | 6 | 7 | def find_latest_checkpoint(path, suffix='pth'): 8 | """Find the latest checkpoint from the working directory. This function is 9 | copied from mmdetection. 10 | 11 | Args: 12 | path(str): The path to find checkpoints. 13 | suffix(str): File extension. 14 | Defaults to pth. 15 | 16 | Returns: 17 | latest_path(str | None): File path of the latest checkpoint. 18 | References: 19 | .. [1] https://github.com/microsoft/SoftTeacher 20 | /blob/main/ssod/utils/patch.py 21 | """ 22 | if not osp.exists(path): 23 | warnings.warn('The path of checkpoints does not exist.') 24 | return None 25 | if osp.exists(osp.join(path, f'latest.{suffix}')): 26 | return osp.join(path, f'latest.{suffix}') 27 | 28 | checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) 29 | if len(checkpoints) == 0: 30 | warnings.warn('There are no checkpoints in the path.') 31 | return None 32 | latest = -1 33 | latest_path = None 34 | for checkpoint in checkpoints: 35 | count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0]) 36 | if count > latest: 37 | latest = count 38 | latest_path = checkpoint 39 | return latest_path 40 | -------------------------------------------------------------------------------- /mmdet3d/ops/pointnet_modules/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.utils import Registry 3 | 4 | SA_MODULES = Registry('point_sa_module') 5 | 6 | 7 | def build_sa_module(cfg, *args, **kwargs): 8 | """Build PointNet2 set abstraction (SA) module. 9 | 10 | Args: 11 | cfg (None or dict): The SA module config, which should contain: 12 | - type (str): Module type. 13 | - module args: Args needed to instantiate an SA module. 14 | args (argument list): Arguments passed to the `__init__` 15 | method of the corresponding module. 16 | kwargs (keyword arguments): Keyword arguments passed to the `__init__` 17 | method of the corresponding SA module . 18 | 19 | Returns: 20 | nn.Module: Created SA module. 21 | """ 22 | if cfg is None: 23 | cfg_ = dict(type='PointSAModule') 24 | else: 25 | if not isinstance(cfg, dict): 26 | raise TypeError('cfg must be a dict') 27 | if 'type' not in cfg: 28 | raise KeyError('the cfg dict must contain the key "type"') 29 | cfg_ = cfg.copy() 30 | 31 | module_type = cfg_.pop('type') 32 | if module_type not in SA_MODULES: 33 | raise KeyError(f'Unrecognized module type {module_type}') 34 | else: 35 | sa_module = SA_MODULES.get(module_type) 36 | 37 | module = sa_module(*args, **kwargs, **cfg_) 38 | 39 | return module 40 | -------------------------------------------------------------------------------- /mmdet3d/models/backbones/base_pointnet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import warnings 3 | from abc import ABCMeta 4 | 5 | from mmcv.runner import BaseModule 6 | 7 | 8 | class BasePointNet(BaseModule, metaclass=ABCMeta): 9 | """Base class for PointNet.""" 10 | 11 | def __init__(self, init_cfg=None, pretrained=None): 12 | super(BasePointNet, self).__init__(init_cfg) 13 | self.fp16_enabled = False 14 | assert not (init_cfg and pretrained), \ 15 | 'init_cfg and pretrained cannot be setting at the same time' 16 | if isinstance(pretrained, str): 17 | warnings.warn('DeprecationWarning: pretrained is a deprecated, ' 18 | 'please use "init_cfg" instead') 19 | self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) 20 | 21 | @staticmethod 22 | def _split_point_feats(points): 23 | """Split coordinates and features of input points. 24 | 25 | Args: 26 | points (torch.Tensor): Point coordinates with features, 27 | with shape (B, N, 3 + input_feature_dim). 28 | 29 | Returns: 30 | torch.Tensor: Coordinates of input points. 31 | torch.Tensor: Features of input points. 32 | """ 33 | xyz = points[..., 0:3].contiguous() 34 | if points.size(-1) > 3: 35 | features = points[..., 3:].transpose(1, 2).contiguous() 36 | else: 37 | features = None 38 | 39 | return xyz, features 40 | -------------------------------------------------------------------------------- /test/test_engine/test_engine.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import unittest 3 | 4 | from deephub.detection_model import Pointpillars 5 | from model.model_deployor.deployor_utils import create_input 6 | from engine.pointpillars_engine import Pointpillars_engine 7 | 8 | pretrain_model = 'checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth' 9 | pcd = 'test/data_tobe_tested/kitti/kitti_000008.bin' 10 | device = 'cuda:0' 11 | 12 | 13 | class TestEngine(unittest.TestCase): 14 | 15 | @classmethod 16 | def setUpClass(cls): 17 | data, model_inputs = create_input(pcd, 'kitti', 'pointpillars', 18 | device) 19 | cls.data = data 20 | cls.model_inputs = model_inputs 21 | torch_model = Pointpillars() 22 | cls.torch_model = torch_model 23 | 24 | # noinspection DuplicatedCode 25 | def test_engine_infer(self): 26 | # warp engine 27 | model = Pointpillars_engine(self.torch_model) 28 | # load pretrain model 29 | checkpoint = torch.load(pretrain_model) 30 | model.torch_model.load_state_dict(checkpoint["state_dict"]) 31 | print('loading pretrain from: ' + pretrain_model) 32 | 33 | # engine inference 34 | model.cuda() 35 | model.eval() 36 | 37 | predict = model(self.data['img_metas'][0], self.data['points'][0]) 38 | 39 | # test 40 | assert len(predict['scores_3d']) != 0 41 | 42 | 43 | 44 | 45 | if __name__ == '__main__': 46 | unittest.main() -------------------------------------------------------------------------------- /mmdet3d/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .backbones import * # noqa: F401,F403 3 | from .builder import (BACKBONES, DETECTORS, FUSION_LAYERS, HEADS, LOSSES, 4 | MIDDLE_ENCODERS, NECKS, ROI_EXTRACTORS, SEGMENTORS, 5 | SHARED_HEADS, VOXEL_ENCODERS, build_backbone, 6 | build_detector, build_fusion_layer, build_head, 7 | build_loss, build_middle_encoder, build_model, 8 | build_neck, build_roi_extractor, build_shared_head, 9 | build_voxel_encoder) 10 | from .decode_heads import * # noqa: F401,F403 11 | from .dense_heads import * # noqa: F401,F403 12 | from .detectors import * # noqa: F401,F403 13 | from .fusion_layers import * # noqa: F401,F403 14 | from .losses import * # noqa: F401,F403 15 | from .middle_encoders import * # noqa: F401,F403 16 | from .model_utils import * # noqa: F401,F403 17 | from .necks import * # noqa: F401,F403 18 | from .roi_heads import * # noqa: F401,F403 19 | from .segmentors import * # noqa: F401,F403 20 | from .voxel_encoders import * # noqa: F401,F403 21 | 22 | __all__ = [ 23 | 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 24 | 'DETECTORS', 'SEGMENTORS', 'VOXEL_ENCODERS', 'MIDDLE_ENCODERS', 25 | 'FUSION_LAYERS', 'build_backbone', 'build_neck', 'build_roi_extractor', 26 | 'build_shared_head', 'build_head', 'build_loss', 'build_detector', 27 | 'build_fusion_layer', 'build_model', 'build_middle_encoder', 28 | 'build_voxel_encoder' 29 | ] 30 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/ops/emd/emd.cpp: -------------------------------------------------------------------------------- 1 | // EMD approximation module (based on auction algorithm) 2 | // author: Minghua Liu 3 | #include 4 | #include 5 | 6 | int emd_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price, 7 | at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments, 8 | at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters); 9 | 10 | int emd_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx); 11 | 12 | 13 | 14 | int emd_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price, 15 | at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments, 16 | at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters) { 17 | return emd_cuda_forward(xyz1, xyz2, dist, assignment, price, assignment_inv, bid, bid_increments, max_increments, unass_idx, unass_cnt, unass_cnt_sum, cnt_tmp, max_idx, eps, iters); 18 | } 19 | 20 | int emd_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx) { 21 | 22 | return emd_cuda_backward(xyz1, xyz2, gradxyz, graddist, idx); 23 | } 24 | 25 | 26 | 27 | 28 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 29 | m.def("forward", &emd_forward, "emd forward (CUDA)"); 30 | m.def("backward", &emd_backward, "emd backward (CUDA)"); 31 | } -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | 4 | import numpy as np 5 | import random 6 | 7 | from pytorch_lightning import Trainer 8 | from pytorch_lightning.callbacks import ModelCheckpoint 9 | from argparse import ArgumentParser 10 | 11 | from models.denoise import PointCloudDenoising 12 | 13 | def main(hparams): 14 | 15 | torch.manual_seed(hparams.seed) 16 | torch.backends.cudnn.deterministic = True 17 | torch.backends.cudnn.benchmark = False 18 | np.random.seed(hparams.seed) 19 | random.seed(hparams.seed) 20 | 21 | module = PointCloudDenoising(hparams) 22 | 23 | if hparams.debug: 24 | trainer = Trainer( 25 | gpus=hparams.n_gpu, 26 | fast_dev_run=True, 27 | logger=False, 28 | checkpoint_callback=False, 29 | distributed_backend='dp' 30 | ) 31 | else: 32 | trainer = Trainer( 33 | gpus=hparams.n_gpu, 34 | early_stop_callback=None, 35 | distributed_backend='dp', 36 | ) 37 | os.makedirs('./lightning_logs', exist_ok=True) 38 | os.makedirs(trainer.logger.log_dir) 39 | trainer.checkpoint_callback = ModelCheckpoint( 40 | filepath = trainer.logger.log_dir, 41 | save_top_k=-1 42 | ) 43 | 44 | trainer.fit(module) 45 | 46 | if __name__ == '__main__': 47 | parser = ArgumentParser(add_help=False) 48 | parser.add_argument('--debug', action='store_true') 49 | parser.add_argument('--n_gpu', type=int, default=1) 50 | parser.add_argument('--seed', type=int, default=2020) 51 | parser = PointCloudDenoising.add_model_specific_args(parser) 52 | 53 | # parse params 54 | hparams = parser.parse_args() 55 | 56 | main(hparams) 57 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner 3 | from .coders import DeltaXYZWLHRBBoxCoder 4 | # from .bbox_target import bbox_target 5 | from .iou_calculators import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D, 6 | BboxOverlapsNearest3D, 7 | axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d, 8 | bbox_overlaps_nearest_3d) 9 | from .samplers import (BaseSampler, CombinedSampler, 10 | InstanceBalancedPosSampler, IoUBalancedNegSampler, 11 | PseudoSampler, RandomSampler, SamplingResult) 12 | from .structures import (BaseInstance3DBoxes, Box3DMode, CameraInstance3DBoxes, 13 | Coord3DMode, DepthInstance3DBoxes, 14 | LiDARInstance3DBoxes, get_box_type, limit_period, 15 | mono_cam_box2vis, points_cam2img, points_img2cam, 16 | xywhr2xyxyr) 17 | from .transforms import bbox3d2result, bbox3d2roi, bbox3d_mapping_back 18 | 19 | __all__ = [ 20 | 'BaseSampler', 'AssignResult', 'BaseAssigner', 'MaxIoUAssigner', 21 | 'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler', 22 | 'IoUBalancedNegSampler', 'CombinedSampler', 'SamplingResult', 23 | 'DeltaXYZWLHRBBoxCoder', 'BboxOverlapsNearest3D', 'BboxOverlaps3D', 24 | 'bbox_overlaps_nearest_3d', 'bbox_overlaps_3d', 25 | 'AxisAlignedBboxOverlaps3D', 'axis_aligned_bbox_overlaps_3d', 'Box3DMode', 26 | 'LiDARInstance3DBoxes', 'CameraInstance3DBoxes', 'bbox3d2roi', 27 | 'bbox3d2result', 'DepthInstance3DBoxes', 'BaseInstance3DBoxes', 28 | 'bbox3d_mapping_back', 'xywhr2xyxyr', 'limit_period', 'points_cam2img', 29 | 'points_img2cam', 'get_box_type', 'Coord3DMode', 'mono_cam_box2vis' 30 | ] 31 | -------------------------------------------------------------------------------- /deephub/denoisy_model/pcp/models/download_models.py: -------------------------------------------------------------------------------- 1 | import os 2 | #import zipfile 3 | import urllib 4 | import tarfile 5 | import argparse 6 | 7 | 8 | def parse_arguments(): 9 | parser = argparse.ArgumentParser() 10 | # naming / file handling 11 | parser.add_argument( 12 | '--task', type=str, default='denoising', help='task name for dataset') 13 | return parser.parse_args() 14 | 15 | def download_model(source_url, target_dir, target_file): 16 | global downloaded 17 | downloaded = 0 18 | def show_progress(count, block_size, total_size): 19 | global downloaded 20 | downloaded += block_size 21 | print('downloading ... %d%%' % round(((downloaded*100.0) / total_size))) 22 | 23 | print('downloading ... ') 24 | urllib.urlretrieve(source_url, filename=target_file, reporthook=show_progress) 25 | print('downloading ... done') 26 | 27 | print('extracting ...') 28 | tar = tarfile.open(target_file, "r:gz") 29 | tar.extractall() 30 | tar.close() 31 | os.remove(target_file) 32 | print('extracting ... done') 33 | 34 | 35 | if __name__ == '__main__': 36 | opt = parse_arguments() 37 | if opt.task == "denoising": 38 | source_url = 'http://geometry.cs.ucl.ac.uk/projects/2019/pointcleannet/data/denoisingModel.tar.gz' 39 | target_dir = os.path.dirname(os.path.abspath(__file__)) 40 | target_file = os.path.join(target_dir, 'denoisingModel.tar.gz') 41 | download_model(source_url, target_dir, target_file) 42 | elif opt.task == "outliers_removal": 43 | source_url = 'http://geometry.cs.ucl.ac.uk/projects/2019/pointcleannet/data/outliersRemovalModel.tar.gz' 44 | target_dir = os.path.dirname(os.path.abspath(__file__)) 45 | target_file = os.path.join(target_dir, 'outliersRemovalModel.tar.gz') 46 | download_model(source_url, target_dir, target_file) 47 | else: 48 | print('unknown model') 49 | -------------------------------------------------------------------------------- /mmdet3d/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import mmcv 3 | 4 | import mmdet 5 | import mmseg 6 | from .version import __version__, short_version 7 | 8 | 9 | def digit_version(version_str): 10 | digit_version = [] 11 | for x in version_str.split('.'): 12 | if x.isdigit(): 13 | digit_version.append(int(x)) 14 | elif x.find('rc') != -1: 15 | patch_version = x.split('rc') 16 | digit_version.append(int(patch_version[0]) - 1) 17 | digit_version.append(int(patch_version[1])) 18 | return digit_version 19 | 20 | 21 | mmcv_minimum_version = '1.4.8' 22 | mmcv_maximum_version = '1.6.0' 23 | mmcv_version = digit_version(mmcv.__version__) 24 | 25 | 26 | assert (mmcv_version >= digit_version(mmcv_minimum_version) 27 | and mmcv_version <= digit_version(mmcv_maximum_version)), \ 28 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 29 | f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' 30 | 31 | mmdet_minimum_version = '2.24.0' 32 | mmdet_maximum_version = '3.0.0' 33 | mmdet_version = digit_version(mmdet.__version__) 34 | assert (mmdet_version >= digit_version(mmdet_minimum_version) 35 | and mmdet_version <= digit_version(mmdet_maximum_version)), \ 36 | f'MMDET=={mmdet.__version__} is used but incompatible. ' \ 37 | f'Please install mmdet>={mmdet_minimum_version}, ' \ 38 | f'<={mmdet_maximum_version}.' 39 | 40 | mmseg_minimum_version = '0.20.0' 41 | mmseg_maximum_version = '1.0.0' 42 | mmseg_version = digit_version(mmseg.__version__) 43 | assert (mmseg_version >= digit_version(mmseg_minimum_version) 44 | and mmseg_version <= digit_version(mmseg_maximum_version)), \ 45 | f'MMSEG=={mmseg.__version__} is used but incompatible. ' \ 46 | f'Please install mmseg>={mmseg_minimum_version}, ' \ 47 | f'<={mmseg_maximum_version}.' 48 | 49 | __all__ = ['__version__', 'short_version'] 50 | -------------------------------------------------------------------------------- /mmdet3d/models/utils/mlp.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.cnn import ConvModule 3 | from mmcv.runner import BaseModule 4 | from torch import nn as nn 5 | 6 | 7 | class MLP(BaseModule): 8 | """A simple MLP module. 9 | 10 | Pass features (B, C, N) through an MLP. 11 | 12 | Args: 13 | in_channels (int, optional): Number of channels of input features. 14 | Default: 18. 15 | conv_channels (tuple[int], optional): Out channels of the convolution. 16 | Default: (256, 256). 17 | conv_cfg (dict, optional): Config of convolution. 18 | Default: dict(type='Conv1d'). 19 | norm_cfg (dict, optional): Config of normalization. 20 | Default: dict(type='BN1d'). 21 | act_cfg (dict, optional): Config of activation. 22 | Default: dict(type='ReLU'). 23 | """ 24 | 25 | def __init__(self, 26 | in_channel=18, 27 | conv_channels=(256, 256), 28 | conv_cfg=dict(type='Conv1d'), 29 | norm_cfg=dict(type='BN1d'), 30 | act_cfg=dict(type='ReLU'), 31 | init_cfg=None): 32 | super().__init__(init_cfg=init_cfg) 33 | self.mlp = nn.Sequential() 34 | prev_channels = in_channel 35 | for i, conv_channel in enumerate(conv_channels): 36 | self.mlp.add_module( 37 | f'layer{i}', 38 | ConvModule( 39 | prev_channels, 40 | conv_channels[i], 41 | 1, 42 | padding=0, 43 | conv_cfg=conv_cfg, 44 | norm_cfg=norm_cfg, 45 | act_cfg=act_cfg, 46 | bias=True, 47 | inplace=True)) 48 | prev_channels = conv_channels[i] 49 | 50 | def forward(self, img_features): 51 | return self.mlp(img_features) 52 | -------------------------------------------------------------------------------- /mmdet3d/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .compose import Compose 3 | from .dbsampler import DataBaseSampler 4 | from .formating import Collect3D, DefaultFormatBundle, DefaultFormatBundle3D 5 | from .loading import (LoadAnnotations3D, LoadImageFromFileMono3D, 6 | LoadMultiViewImageFromFiles, LoadPointsFromDict, 7 | LoadPointsFromFile, LoadPointsFromMultiSweeps, 8 | NormalizePointsColor, PointSegClassMapping) 9 | from .test_time_aug import MultiScaleFlipAug3D 10 | # yapf: disable 11 | from .transforms_3d import (AffineResize, BackgroundPointsFilter, 12 | GlobalAlignment, GlobalRotScaleTrans, 13 | IndoorPatchPointSample, IndoorPointSample, 14 | MultiViewWrapper, ObjectNameFilter, ObjectNoise, 15 | ObjectRangeFilter, ObjectSample, PointSample, 16 | PointShuffle, PointsRangeFilter, 17 | RandomDropPointsColor, RandomFlip3D, 18 | RandomJitterPoints, RandomRotate, RandomShiftScale, 19 | RangeLimitedRandomCrop, VoxelBasedPointSampler) 20 | 21 | __all__ = [ 22 | 'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans', 23 | 'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', 'Collect3D', 24 | 'Compose', 'LoadMultiViewImageFromFiles', 'LoadPointsFromFile', 25 | 'DefaultFormatBundle', 'DefaultFormatBundle3D', 'DataBaseSampler', 26 | 'NormalizePointsColor', 'LoadAnnotations3D', 'IndoorPointSample', 27 | 'PointSample', 'PointSegClassMapping', 'MultiScaleFlipAug3D', 28 | 'LoadPointsFromMultiSweeps', 'BackgroundPointsFilter', 29 | 'VoxelBasedPointSampler', 'GlobalAlignment', 'IndoorPatchPointSample', 30 | 'LoadImageFromFileMono3D', 'ObjectNameFilter', 'RandomDropPointsColor', 31 | 'RandomJitterPoints', 'AffineResize', 'RandomShiftScale', 32 | 'LoadPointsFromDict', 'MultiViewWrapper', 'RandomRotate', 33 | 'RangeLimitedRandomCrop' 34 | ] 35 | -------------------------------------------------------------------------------- /mmdet3d/datasets/pipelines/.ipynb_checkpoints/__init__-checkpoint.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .compose import Compose 3 | from .dbsampler import DataBaseSampler 4 | from .formating import Collect3D, DefaultFormatBundle, DefaultFormatBundle3D 5 | from .loading import (LoadAnnotations3D, LoadImageFromFileMono3D, 6 | LoadMultiViewImageFromFiles, LoadPointsFromDict, 7 | LoadPointsFromFile, LoadPointsFromMultiSweeps, 8 | NormalizePointsColor, PointSegClassMapping) 9 | from .test_time_aug import MultiScaleFlipAug3D 10 | # yapf: disable 11 | from .transforms_3d import (AffineResize, BackgroundPointsFilter, 12 | GlobalAlignment, GlobalRotScaleTrans, 13 | IndoorPatchPointSample, IndoorPointSample, 14 | MultiViewWrapper, ObjectNameFilter, ObjectNoise, 15 | ObjectRangeFilter, ObjectSample, PointSample, 16 | PointShuffle, PointsRangeFilter, 17 | RandomDropPointsColor, RandomFlip3D, 18 | RandomJitterPoints, RandomRotate, RandomShiftScale, 19 | RangeLimitedRandomCrop, VoxelBasedPointSampler) 20 | 21 | __all__ = [ 22 | 'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans', 23 | 'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', 'Collect3D', 24 | 'Compose', 'LoadMultiViewImageFromFiles', 'LoadPointsFromFile', 25 | 'DefaultFormatBundle', 'DefaultFormatBundle3D', 'DataBaseSampler', 26 | 'NormalizePointsColor', 'LoadAnnotations3D', 'IndoorPointSample', 27 | 'PointSample', 'PointSegClassMapping', 'MultiScaleFlipAug3D', 28 | 'LoadPointsFromMultiSweeps', 'BackgroundPointsFilter', 29 | 'VoxelBasedPointSampler', 'GlobalAlignment', 'IndoorPatchPointSample', 30 | 'LoadImageFromFileMono3D', 'ObjectNameFilter', 'RandomDropPointsColor', 31 | 'RandomJitterPoints', 'AffineResize', 'RandomShiftScale', 32 | 'LoadPointsFromDict', 'MultiViewWrapper', 'RandomRotate', 33 | 'RangeLimitedRandomCrop' 34 | ] 35 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/two_stage.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import warnings 3 | 4 | from mmdet.models import TwoStageDetector 5 | from ..builder import DETECTORS, build_backbone, build_head, build_neck 6 | from .base import Base3DDetector 7 | 8 | 9 | @DETECTORS.register_module() 10 | class TwoStage3DDetector(Base3DDetector, TwoStageDetector): 11 | """Base class of two-stage 3D detector. 12 | 13 | It inherits original ``:class:TwoStageDetector`` and 14 | ``:class:Base3DDetector``. This class could serve as a base class for all 15 | two-stage 3D detectors. 16 | """ 17 | 18 | def __init__(self, 19 | backbone, 20 | neck=None, 21 | rpn_head=None, 22 | roi_head=None, 23 | train_cfg=None, 24 | test_cfg=None, 25 | pretrained=None, 26 | init_cfg=None): 27 | super(TwoStageDetector, self).__init__(init_cfg) 28 | if pretrained: 29 | warnings.warn('DeprecationWarning: pretrained is deprecated, ' 30 | 'please use "init_cfg" instead') 31 | backbone.pretrained = pretrained 32 | self.backbone = build_backbone(backbone) 33 | self.train_cfg = train_cfg 34 | self.test_cfg = test_cfg 35 | if neck is not None: 36 | self.neck = build_neck(neck) 37 | 38 | if rpn_head is not None: 39 | rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None 40 | rpn_head_ = rpn_head.copy() 41 | rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) 42 | self.rpn_head = build_head(rpn_head_) 43 | 44 | if roi_head is not None: 45 | # update train and test cfg here for now 46 | # TODO: refactor assigner & sampler 47 | rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None 48 | roi_head.update(train_cfg=rcnn_train_cfg) 49 | roi_head.update(test_cfg=test_cfg.rcnn) 50 | roi_head.pretrained = pretrained 51 | self.roi_head = build_head(roi_head) 52 | -------------------------------------------------------------------------------- /mmdet3d/ops/dgcnn_modules/dgcnn_fp_module.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.cnn import ConvModule 3 | from mmcv.runner import BaseModule, force_fp32 4 | from torch import nn as nn 5 | 6 | 7 | class DGCNNFPModule(BaseModule): 8 | """Point feature propagation module used in DGCNN. 9 | 10 | Propagate the features from one set to another. 11 | 12 | Args: 13 | mlp_channels (list[int]): List of mlp channels. 14 | norm_cfg (dict, optional): Type of activation method. 15 | Defaults to dict(type='BN1d'). 16 | act_cfg (dict, optional): Type of activation method. 17 | Defaults to dict(type='ReLU'). 18 | init_cfg (dict, optional): Initialization config. Defaults to None. 19 | """ 20 | 21 | def __init__(self, 22 | mlp_channels, 23 | norm_cfg=dict(type='BN1d'), 24 | act_cfg=dict(type='ReLU'), 25 | init_cfg=None): 26 | super().__init__(init_cfg=init_cfg) 27 | self.fp16_enabled = False 28 | self.mlps = nn.Sequential() 29 | for i in range(len(mlp_channels) - 1): 30 | self.mlps.add_module( 31 | f'layer{i}', 32 | ConvModule( 33 | mlp_channels[i], 34 | mlp_channels[i + 1], 35 | kernel_size=(1, ), 36 | stride=(1, ), 37 | conv_cfg=dict(type='Conv1d'), 38 | norm_cfg=norm_cfg, 39 | act_cfg=act_cfg)) 40 | 41 | @force_fp32() 42 | def forward(self, points): 43 | """forward. 44 | 45 | Args: 46 | points (Tensor): (B, N, C) tensor of the input points. 47 | 48 | Returns: 49 | Tensor: (B, N, M) M = mlp[-1], tensor of the new points. 50 | """ 51 | 52 | if points is not None: 53 | new_points = points.transpose(1, 2).contiguous() # (B, C, N) 54 | new_points = self.mlps(new_points) 55 | new_points = new_points.transpose(1, 2).contiguous() 56 | else: 57 | new_points = points 58 | 59 | return new_points 60 | -------------------------------------------------------------------------------- /mmdet3d/datasets/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import platform 3 | 4 | from mmcv.utils import Registry, build_from_cfg 5 | 6 | from mmdet.datasets import DATASETS as MMDET_DATASETS 7 | from mmdet.datasets.builder import _concat_dataset 8 | 9 | if platform.system() != 'Windows': 10 | # https://github.com/pytorch/pytorch/issues/973 11 | import resource 12 | rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) 13 | base_soft_limit = rlimit[0] 14 | hard_limit = rlimit[1] 15 | soft_limit = min(max(4096, base_soft_limit), hard_limit) 16 | resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) 17 | 18 | OBJECTSAMPLERS = Registry('Object sampler') 19 | DATASETS = Registry('dataset') 20 | PIPELINES = Registry('pipeline') 21 | 22 | 23 | def build_dataset(cfg, default_args=None): 24 | from mmdet3d.datasets.dataset_wrappers import CBGSDataset 25 | from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset, 26 | ConcatDataset, RepeatDataset) 27 | if isinstance(cfg, (list, tuple)): 28 | dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) 29 | elif cfg['type'] == 'ConcatDataset': 30 | dataset = ConcatDataset( 31 | [build_dataset(c, default_args) for c in cfg['datasets']], 32 | cfg.get('separate_eval', True)) 33 | elif cfg['type'] == 'RepeatDataset': 34 | dataset = RepeatDataset( 35 | build_dataset(cfg['dataset'], default_args), cfg['times']) 36 | elif cfg['type'] == 'ClassBalancedDataset': 37 | dataset = ClassBalancedDataset( 38 | build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) 39 | elif cfg['type'] == 'CBGSDataset': 40 | dataset = CBGSDataset(build_dataset(cfg['dataset'], default_args)) 41 | elif isinstance(cfg.get('ann_file'), (list, tuple)): 42 | dataset = _concat_dataset(cfg, default_args) 43 | elif cfg['type'] in DATASETS._module_dict.keys(): 44 | dataset = build_from_cfg(cfg, DATASETS, default_args) 45 | else: 46 | dataset = build_from_cfg(cfg, MMDET_DATASETS, default_args) 47 | return dataset 48 | -------------------------------------------------------------------------------- /mmdet3d/models/roi_heads/roi_extractors/single_roiaware_extractor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv import ops 4 | from mmcv.runner import BaseModule 5 | 6 | from mmdet3d.models.builder import ROI_EXTRACTORS 7 | 8 | 9 | @ROI_EXTRACTORS.register_module() 10 | class Single3DRoIAwareExtractor(BaseModule): 11 | """Point-wise roi-aware Extractor. 12 | 13 | Extract Point-wise roi features. 14 | 15 | Args: 16 | roi_layer (dict): The config of roi layer. 17 | """ 18 | 19 | def __init__(self, roi_layer=None, init_cfg=None): 20 | super(Single3DRoIAwareExtractor, self).__init__(init_cfg=init_cfg) 21 | self.roi_layer = self.build_roi_layers(roi_layer) 22 | 23 | def build_roi_layers(self, layer_cfg): 24 | """Build roi layers using `layer_cfg`""" 25 | cfg = layer_cfg.copy() 26 | layer_type = cfg.pop('type') 27 | assert hasattr(ops, layer_type) 28 | layer_cls = getattr(ops, layer_type) 29 | roi_layers = layer_cls(**cfg) 30 | return roi_layers 31 | 32 | def forward(self, feats, coordinate, batch_inds, rois): 33 | """Extract point-wise roi features. 34 | 35 | Args: 36 | feats (torch.FloatTensor): Point-wise features with 37 | shape (batch, npoints, channels) for pooling. 38 | coordinate (torch.FloatTensor): Coordinate of each point. 39 | batch_inds (torch.LongTensor): Indicate the batch of each point. 40 | rois (torch.FloatTensor): Roi boxes with batch indices. 41 | 42 | Returns: 43 | torch.FloatTensor: Pooled features 44 | """ 45 | pooled_roi_feats = [] 46 | for batch_idx in range(int(batch_inds.max()) + 1): 47 | roi_inds = (rois[..., 0].int() == batch_idx) 48 | coors_inds = (batch_inds.int() == batch_idx) 49 | pooled_roi_feat = self.roi_layer(rois[..., 1:][roi_inds], 50 | coordinate[coors_inds], 51 | feats[coors_inds]) 52 | pooled_roi_feats.append(pooled_roi_feat) 53 | pooled_roi_feats = torch.cat(pooled_roi_feats, 0) 54 | return pooled_roi_feats 55 | -------------------------------------------------------------------------------- /mmdet3d/datasets/pipelines/compose.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import collections 3 | 4 | from mmcv.utils import build_from_cfg 5 | 6 | from mmdet.datasets.builder import PIPELINES as MMDET_PIPELINES 7 | from ..builder import PIPELINES 8 | 9 | 10 | @PIPELINES.register_module() 11 | class Compose: 12 | """Compose multiple transforms sequentially. The pipeline registry of 13 | mmdet3d separates with mmdet, however, sometimes we may need to use mmdet's 14 | pipeline. So the class is rewritten to be able to use pipelines from both 15 | mmdet3d and mmdet. 16 | 17 | Args: 18 | transforms (Sequence[dict | callable]): Sequence of transform object or 19 | config dict to be composed. 20 | """ 21 | 22 | def __init__(self, transforms): 23 | assert isinstance(transforms, collections.abc.Sequence) 24 | self.transforms = [] 25 | for transform in transforms: 26 | if isinstance(transform, dict): 27 | _, key = PIPELINES.split_scope_key(transform['type']) 28 | if key in PIPELINES._module_dict.keys(): 29 | transform = build_from_cfg(transform, PIPELINES) 30 | else: 31 | transform = build_from_cfg(transform, MMDET_PIPELINES) 32 | self.transforms.append(transform) 33 | elif callable(transform): 34 | self.transforms.append(transform) 35 | else: 36 | raise TypeError('transform must be callable or a dict') 37 | 38 | def __call__(self, data): 39 | """Call function to apply transforms sequentially. 40 | 41 | Args: 42 | data (dict): A result dict contains the data to transform. 43 | 44 | Returns: 45 | dict: Transformed data. 46 | """ 47 | 48 | for t in self.transforms: 49 | data = t(data) 50 | if data is None: 51 | return None 52 | return data 53 | 54 | def __repr__(self): 55 | format_string = self.__class__.__name__ + '(' 56 | for t in self.transforms: 57 | format_string += '\n' 58 | format_string += f' {t}' 59 | format_string += '\n)' 60 | return format_string 61 | -------------------------------------------------------------------------------- /mmdet3d/models/decode_heads/dgcnn_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.cnn.bricks import ConvModule 3 | 4 | from mmdet3d.ops import DGCNNFPModule 5 | from ..builder import HEADS 6 | from .decode_head import Base3DDecodeHead 7 | 8 | 9 | @HEADS.register_module() 10 | class DGCNNHead(Base3DDecodeHead): 11 | r"""DGCNN decoder head. 12 | 13 | Decoder head used in `DGCNN `_. 14 | Refer to the 15 | `reimplementation code `_. 16 | 17 | Args: 18 | fp_channels (tuple[int], optional): Tuple of mlp channels in feature 19 | propagation (FP) modules. Defaults to (1216, 512). 20 | """ 21 | 22 | def __init__(self, fp_channels=(1216, 512), **kwargs): 23 | super(DGCNNHead, self).__init__(**kwargs) 24 | 25 | self.FP_module = DGCNNFPModule( 26 | mlp_channels=fp_channels, act_cfg=self.act_cfg) 27 | 28 | # https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_sem_seg.py#L40 29 | self.pre_seg_conv = ConvModule( 30 | fp_channels[-1], 31 | self.channels, 32 | kernel_size=1, 33 | bias=False, 34 | conv_cfg=self.conv_cfg, 35 | norm_cfg=self.norm_cfg, 36 | act_cfg=self.act_cfg) 37 | 38 | def _extract_input(self, feat_dict): 39 | """Extract inputs from features dictionary. 40 | 41 | Args: 42 | feat_dict (dict): Feature dict from backbone. 43 | 44 | Returns: 45 | torch.Tensor: points for decoder. 46 | """ 47 | fa_points = feat_dict['fa_points'] 48 | 49 | return fa_points 50 | 51 | def forward(self, feat_dict): 52 | """Forward pass. 53 | 54 | Args: 55 | feat_dict (dict): Feature dict from backbone. 56 | 57 | Returns: 58 | torch.Tensor: Segmentation map of shape [B, num_classes, N]. 59 | """ 60 | fa_points = self._extract_input(feat_dict) 61 | 62 | fp_points = self.FP_module(fa_points) 63 | fp_points = fp_points.transpose(1, 2).contiguous() 64 | output = self.pre_seg_conv(fp_points) 65 | output = self.cls_seg(output) 66 | 67 | return output 68 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/mvx_faster_rcnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv.runner import force_fp32 4 | from torch.nn import functional as F 5 | 6 | from ..builder import DETECTORS 7 | from .mvx_two_stage import MVXTwoStageDetector 8 | 9 | 10 | @DETECTORS.register_module() 11 | class MVXFasterRCNN(MVXTwoStageDetector): 12 | """Multi-modality VoxelNet using Faster R-CNN.""" 13 | 14 | def __init__(self, **kwargs): 15 | super(MVXFasterRCNN, self).__init__(**kwargs) 16 | 17 | 18 | @DETECTORS.register_module() 19 | class DynamicMVXFasterRCNN(MVXTwoStageDetector): 20 | """Multi-modality VoxelNet using Faster R-CNN and dynamic voxelization.""" 21 | 22 | def __init__(self, **kwargs): 23 | super(DynamicMVXFasterRCNN, self).__init__(**kwargs) 24 | 25 | @torch.no_grad() 26 | @force_fp32() 27 | def voxelize(self, points): 28 | """Apply dynamic voxelization to points. 29 | 30 | Args: 31 | points (list[torch.Tensor]): Points of each sample. 32 | 33 | Returns: 34 | tuple[torch.Tensor]: Concatenated points and coordinates. 35 | """ 36 | coors = [] 37 | # dynamic voxelization only provide a coors mapping 38 | for res in points: 39 | res_coors = self.pts_voxel_layer(res) 40 | coors.append(res_coors) 41 | points = torch.cat(points, dim=0) 42 | coors_batch = [] 43 | for i, coor in enumerate(coors): 44 | coor_pad = F.pad(coor, (1, 0), mode='constant', value=i) 45 | coors_batch.append(coor_pad) 46 | coors_batch = torch.cat(coors_batch, dim=0) 47 | return points, coors_batch 48 | 49 | def extract_pts_feat(self, points, img_feats, img_metas): 50 | """Extract point features.""" 51 | if not self.with_pts_bbox: 52 | return None 53 | voxels, coors = self.voxelize(points) 54 | voxel_features, feature_coors = self.pts_voxel_encoder( 55 | voxels, coors, points, img_feats, img_metas) 56 | batch_size = coors[-1, 0] + 1 57 | x = self.pts_middle_encoder(voxel_features, feature_coors, batch_size) 58 | x = self.pts_backbone(x) 59 | if self.with_pts_neck: 60 | x = self.pts_neck(x) 61 | return x 62 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/environment.yml: -------------------------------------------------------------------------------- 1 | name: DMRDenoise 2 | channels: 3 | - pytorch 4 | - conda-forge 5 | - defaults 6 | dependencies: 7 | - _libgcc_mutex=0.1=main 8 | - absl-py=0.9.0=py36_0 9 | - blas=1.0=mkl 10 | - c-ares=1.16.1=h516909a_0 11 | - ca-certificates=2020.6.20=hecda079_0 12 | - certifi=2020.6.20=py36h9f0ad1d_0 13 | - cudatoolkit=9.2=0 14 | - freetype=2.10.2=h5ab3b9f_0 15 | - future=0.18.2=py36h9f0ad1d_1 16 | - grpcio=1.30.0=py36h769ab6c_0 17 | - h5py=2.10.0=nompi_py36hecadee3_104 18 | - hdf5=1.10.6=nompi_h3c11f04_100 19 | - importlib-metadata=1.7.0=py36h9f0ad1d_0 20 | - intel-openmp=2020.1=217 21 | - joblib=0.16.0=py_0 22 | - jpeg=9b=h024ee3a_2 23 | - lcms2=2.11=h396b838_0 24 | - ld_impl_linux-64=2.33.1=h53a641e_7 25 | - libedit=3.1.20191231=h14c3975_1 26 | - libffi=3.3=he6710b0_2 27 | - libgcc-ng=9.1.0=hdf63c60_0 28 | - libgfortran-ng=7.3.0=hdf63c60_0 29 | - libpng=1.6.37=hbc83047_0 30 | - libprotobuf=3.12.3=h8b12597_2 31 | - libstdcxx-ng=9.1.0=hdf63c60_0 32 | - libtiff=4.1.0=h2733197_1 33 | - lz4-c=1.9.2=he6710b0_0 34 | - markdown=3.2.2=py_0 35 | - mkl=2020.1=217 36 | - mkl-service=2.3.0=py36he904b0f_0 37 | - mkl_fft=1.1.0=py36h23d657b_0 38 | - mkl_random=1.1.1=py36h0573a6f_0 39 | - ncurses=6.2=he6710b0_1 40 | - ninja=1.9.0=py36hfd86e86_0 41 | - numpy=1.18.5=py36ha1c710e_0 42 | - numpy-base=1.18.5=py36hde5b4d6_0 43 | - olefile=0.46=py36_0 44 | - openssl=1.1.1g=h516909a_0 45 | - pillow=7.2.0=py36hb39fc2d_0 46 | - pip=20.1.1=py36_1 47 | - protobuf=3.12.3=py36h831f99a_0 48 | - python=3.6.10=h7579374_2 49 | - python_abi=3.6=1_cp36m 50 | - pytorch=1.5.1=py3.6_cuda9.2.148_cudnn7.6.3_0 51 | - pytorch-lightning=0.7.6=py_0 52 | - pyyaml=5.3.1=py36h8c4c3a4_0 53 | - readline=8.0=h7b6447c_0 54 | - scikit-learn=0.23.1=py36h423224d_0 55 | - scipy=1.5.0=py36h0b6359f_0 56 | - setuptools=49.2.0=py36_0 57 | - six=1.15.0=py_0 58 | - sqlite=3.32.3=h62c20be_0 59 | - tensorboard=1.15.0=py36_0 60 | - threadpoolctl=2.1.0=pyh5ca1d4c_0 61 | - tk=8.6.10=hbc83047_0 62 | - torchvision=0.6.1=py36_cu92 63 | - tqdm=4.48.0=pyh9f0ad1d_0 64 | - werkzeug=1.0.1=pyh9f0ad1d_0 65 | - wheel=0.34.2=py36_0 66 | - xz=5.2.5=h7b6447c_0 67 | - yaml=0.2.5=h516909a_0 68 | - zipp=3.1.0=py_0 69 | - zlib=1.2.11=h7b6447c_3 70 | - zstd=1.4.5=h0b5b093_0 71 | 72 | -------------------------------------------------------------------------------- /model/model_ensemblor/model_ensemble.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from ensemble_utils import box_torch_ops 3 | 4 | def model_ensemble(model_results_list, method_name): 5 | """ 6 | ensemble different model results to get better results. 7 | 8 | Args: 9 | model_results_list: list. different model predictions path. 10 | method_name: str. ensemble method name. 11 | return: 12 | different model ensemble results. 13 | 14 | """ 15 | model_results_data = [] 16 | for path in model_results_list: 17 | with open(path, 'rb') as fo: 18 | model_results_data.append(pickle.load(fo, encoding='bytes')) 19 | 20 | label = list(model_results_data[0].keys()) 21 | prediction_dicts = [] 22 | final_result = {} 23 | 24 | box_preds = [] 25 | scores = [] 26 | labels = [] 27 | 28 | for i in range(len(label)): 29 | for model_result_data in model_results_data: 30 | model_result_data_ele = model_result_data[label[i]] 31 | box_preds.append(model_result_data_ele['box3d_lidar']) 32 | scores.append(model_result_data_ele['scores']) 33 | preds.append(model_result_data_ele['label_preds']) 34 | gfpn_box_data = gfpn_boxes_data[label[i]] 35 | 36 | box_preds = torch.stack(box_preds).to('cuda:0') 37 | scores = torch.stack(scores).to('cuda:0') 38 | labels = torch.stack(labels).to('cuda:0') 39 | 40 | if method_name == 'nms': 41 | boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]] 42 | 43 | selected = box_torch_ops.rotate_nms_pcdet(boxes_for_nms.float(), scores.float(), 44 | thresh=0.2, 45 | pre_maxsize=1000, 46 | post_max_size=83) 47 | 48 | selected_boxes = box_preds[selected] 49 | selected_scores = scores[selected] 50 | selected_labels = labels[selected] 51 | 52 | prediction_dict = { 53 | 'box3d_lidar': boxes, 54 | 'scores': scores, 55 | 'label_preds': labels, 56 | 'metadata': model_results_data[0]['metadata'] 57 | } 58 | 59 | final_result[label[i]] = prediction_dict 60 | prediction_dicts.append(prediction_dict) 61 | 62 | return prediction_dicts 63 | 64 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/ops/emd/README.md: -------------------------------------------------------------------------------- 1 | This code was taken from [https://github.com/Colin97/MSN-Point-Cloud-Completion/tree/master/emd](https://github.com/Colin97/MSN-Point-Cloud-Completion/tree/master/emd). It is a part of the implementation of the following paper: 2 | 3 | ``` 4 | @article{liu2019morphing, 5 | title={Morphing and Sampling Network for Dense Point Cloud Completion}, 6 | author={Liu, Minghua and Sheng, Lu and Yang, Sheng and Shao, Jing and Hu, Shi-Min}, 7 | journal={arXiv preprint arXiv:1912.00280}, 8 | year={2019} 9 | } 10 | ``` 11 | 12 | ## Earth Mover's Distance of point clouds 13 | 14 | Compared to the Chamfer Distance (CD), the Earth Mover's Distance (EMD) is more reliable to distinguish the visual quality of the point clouds. See our [paper](http://cseweb.ucsd.edu/~mil070/projects/AAAI2020/paper.pdf) for more details. 15 | 16 | We provide an EMD implementation for point cloud comparison, which only needs $O(n)$ memory and thus enables dense point clouds (with 10,000 points or over) and large batch size. It is based on an approximated algorithm (auction algorithm) and cannot guarantee a (but near) bijection assignment. It employs a parameter $\epsilon$ to balance the error rate and the speed of convergence. Smaller $\epsilon$ achieves more accurate results, but needs a longer time for convergence. The time complexity is $O(n^2k)$, where $k$ is the number of iterations. We set a $\epsilon = 0.005, k = 50$ during training and a $\epsilon = 0.002, k = 10000$ during testing. 17 | 18 | ### Compile 19 | Run `python3 setup.py install` to compile. 20 | 21 | ### Example 22 | See `emd_module.py/test_emd()` for examples. 23 | 24 | ### Input 25 | 26 | - **xyz1, xyz2**: float tensors with shape `[#batch, #points, 3]`. xyz1 is the predicted point cloud and xyz2 is the ground truth point cloud. Two point clouds should have same size and be normalized to [0, 1]. The number of points should be a multiple of 1024. The batch size should be no greater than 512. Since we only calculate gradients for xyz1, please do not swap xyz1 and xyz2. 27 | - **eps**: a float tensor, the parameter balances the error rate and the speed of convergence. 28 | - **iters**: a int tensor, the number of iterations. 29 | 30 | ### Output 31 | 32 | - **dist**: a float tensor with shape `[#batch, #points]`. sqrt(dist) are the L2 distances between the pairs of points. 33 | - **assignment**: a int tensor with shape `[#batch, #points]`. The index of the matched point in the ground truth point cloud. 34 | -------------------------------------------------------------------------------- /mmdet3d/models/decode_heads/paconv_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.cnn.bricks import ConvModule 3 | 4 | from ..builder import HEADS 5 | from .pointnet2_head import PointNet2Head 6 | 7 | 8 | @HEADS.register_module() 9 | class PAConvHead(PointNet2Head): 10 | r"""PAConv decoder head. 11 | 12 | Decoder head used in `PAConv `_. 13 | Refer to the `official code `_. 14 | 15 | Args: 16 | fp_channels (tuple[tuple[int]]): Tuple of mlp channels in FP modules. 17 | fp_norm_cfg (dict): Config of norm layers used in FP modules. 18 | Default: dict(type='BN2d'). 19 | """ 20 | 21 | def __init__(self, 22 | fp_channels=((768, 256, 256), (384, 256, 256), 23 | (320, 256, 128), (128 + 6, 128, 128, 128)), 24 | fp_norm_cfg=dict(type='BN2d'), 25 | **kwargs): 26 | super(PAConvHead, self).__init__(fp_channels, fp_norm_cfg, **kwargs) 27 | 28 | # https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/pointnet2/pointnet2_paconv_seg.py#L53 29 | # PointNet++'s decoder conv has bias while PAConv's doesn't have 30 | # so we need to rebuild it here 31 | self.pre_seg_conv = ConvModule( 32 | fp_channels[-1][-1], 33 | self.channels, 34 | kernel_size=1, 35 | bias=False, 36 | conv_cfg=self.conv_cfg, 37 | norm_cfg=self.norm_cfg, 38 | act_cfg=self.act_cfg) 39 | 40 | def forward(self, feat_dict): 41 | """Forward pass. 42 | 43 | Args: 44 | feat_dict (dict): Feature dict from backbone. 45 | 46 | Returns: 47 | torch.Tensor: Segmentation map of shape [B, num_classes, N]. 48 | """ 49 | sa_xyz, sa_features = self._extract_input(feat_dict) 50 | 51 | # PointNet++ doesn't use the first level of `sa_features` as input 52 | # while PAConv inputs it through skip-connection 53 | fp_feature = sa_features[-1] 54 | 55 | for i in range(self.num_fp): 56 | # consume the points in a bottom-up manner 57 | fp_feature = self.FP_modules[i](sa_xyz[-(i + 2)], sa_xyz[-(i + 1)], 58 | sa_features[-(i + 2)], fp_feature) 59 | 60 | output = self.pre_seg_conv(fp_feature) 61 | output = self.cls_seg(output) 62 | 63 | return output 64 | -------------------------------------------------------------------------------- /mmdet3d/ops/dgcnn_modules/dgcnn_fa_module.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv.cnn import ConvModule 4 | from mmcv.runner import BaseModule, force_fp32 5 | from torch import nn as nn 6 | 7 | 8 | class DGCNNFAModule(BaseModule): 9 | """Point feature aggregation module used in DGCNN. 10 | 11 | Aggregate all the features of points. 12 | 13 | Args: 14 | mlp_channels (list[int]): List of mlp channels. 15 | norm_cfg (dict, optional): Type of normalization method. 16 | Defaults to dict(type='BN1d'). 17 | act_cfg (dict, optional): Type of activation method. 18 | Defaults to dict(type='ReLU'). 19 | init_cfg (dict, optional): Initialization config. Defaults to None. 20 | """ 21 | 22 | def __init__(self, 23 | mlp_channels, 24 | norm_cfg=dict(type='BN1d'), 25 | act_cfg=dict(type='ReLU'), 26 | init_cfg=None): 27 | super().__init__(init_cfg=init_cfg) 28 | self.fp16_enabled = False 29 | self.mlps = nn.Sequential() 30 | for i in range(len(mlp_channels) - 1): 31 | self.mlps.add_module( 32 | f'layer{i}', 33 | ConvModule( 34 | mlp_channels[i], 35 | mlp_channels[i + 1], 36 | kernel_size=(1, ), 37 | stride=(1, ), 38 | conv_cfg=dict(type='Conv1d'), 39 | norm_cfg=norm_cfg, 40 | act_cfg=act_cfg)) 41 | 42 | @force_fp32() 43 | def forward(self, points): 44 | """forward. 45 | 46 | Args: 47 | points (List[Tensor]): tensor of the features to be aggregated. 48 | 49 | Returns: 50 | Tensor: (B, N, M) M = mlp[-1], tensor of the output points. 51 | """ 52 | 53 | if len(points) > 1: 54 | new_points = torch.cat(points[1:], dim=-1) 55 | new_points = new_points.transpose(1, 2).contiguous() # (B, C, N) 56 | new_points_copy = new_points 57 | 58 | new_points = self.mlps(new_points) 59 | 60 | new_fa_points = new_points.max(dim=-1, keepdim=True)[0] 61 | new_fa_points = new_fa_points.repeat(1, 1, new_points.shape[-1]) 62 | 63 | new_points = torch.cat([new_fa_points, new_points_copy], dim=1) 64 | new_points = new_points.transpose(1, 2).contiguous() 65 | else: 66 | new_points = points 67 | 68 | return new_points 69 | -------------------------------------------------------------------------------- /mmdet3d/core/points/depth_points.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_points import BasePoints 3 | 4 | 5 | class DepthPoints(BasePoints): 6 | """Points of instances in DEPTH coordinates. 7 | 8 | Args: 9 | tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. 10 | points_dim (int, optional): Number of the dimension of a point. 11 | Each row is (x, y, z). Defaults to 3. 12 | attribute_dims (dict, optional): Dictionary to indicate the 13 | meaning of extra dimension. Defaults to None. 14 | 15 | Attributes: 16 | tensor (torch.Tensor): Float matrix of N x points_dim. 17 | points_dim (int): Integer indicating the dimension of a point. 18 | Each row is (x, y, z, ...). 19 | attribute_dims (bool): Dictionary to indicate the meaning of extra 20 | dimension. Defaults to None. 21 | rotation_axis (int): Default rotation axis for points rotation. 22 | """ 23 | 24 | def __init__(self, tensor, points_dim=3, attribute_dims=None): 25 | super(DepthPoints, self).__init__( 26 | tensor, points_dim=points_dim, attribute_dims=attribute_dims) 27 | self.rotation_axis = 2 28 | 29 | def flip(self, bev_direction='horizontal'): 30 | """Flip the points along given BEV direction. 31 | 32 | Args: 33 | bev_direction (str): Flip direction (horizontal or vertical). 34 | """ 35 | if bev_direction == 'horizontal': 36 | self.tensor[:, 0] = -self.tensor[:, 0] 37 | elif bev_direction == 'vertical': 38 | self.tensor[:, 1] = -self.tensor[:, 1] 39 | 40 | def convert_to(self, dst, rt_mat=None): 41 | """Convert self to ``dst`` mode. 42 | 43 | Args: 44 | dst (:obj:`CoordMode`): The target Point mode. 45 | rt_mat (np.ndarray | torch.Tensor, optional): The rotation and 46 | translation matrix between different coordinates. 47 | Defaults to None. 48 | The conversion from `src` coordinates to `dst` coordinates 49 | usually comes along the change of sensors, e.g., from camera 50 | to LiDAR. This requires a transformation matrix. 51 | 52 | Returns: 53 | :obj:`BasePoints`: The converted point of the same type 54 | in the `dst` mode. 55 | """ 56 | from mmdet3d.core.bbox import Coord3DMode 57 | return Coord3DMode.convert_point( 58 | point=self, src=Coord3DMode.DEPTH, dst=dst, rt_mat=rt_mat) 59 | -------------------------------------------------------------------------------- /mmdet3d/core/points/lidar_points.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_points import BasePoints 3 | 4 | 5 | class LiDARPoints(BasePoints): 6 | """Points of instances in LIDAR coordinates. 7 | 8 | Args: 9 | tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. 10 | points_dim (int, optional): Number of the dimension of a point. 11 | Each row is (x, y, z). Defaults to 3. 12 | attribute_dims (dict, optional): Dictionary to indicate the 13 | meaning of extra dimension. Defaults to None. 14 | 15 | Attributes: 16 | tensor (torch.Tensor): Float matrix of N x points_dim. 17 | points_dim (int): Integer indicating the dimension of a point. 18 | Each row is (x, y, z, ...). 19 | attribute_dims (bool): Dictionary to indicate the meaning of extra 20 | dimension. Defaults to None. 21 | rotation_axis (int): Default rotation axis for points rotation. 22 | """ 23 | 24 | def __init__(self, tensor, points_dim=3, attribute_dims=None): 25 | super(LiDARPoints, self).__init__( 26 | tensor, points_dim=points_dim, attribute_dims=attribute_dims) 27 | self.rotation_axis = 2 28 | 29 | def flip(self, bev_direction='horizontal'): 30 | """Flip the points along given BEV direction. 31 | 32 | Args: 33 | bev_direction (str): Flip direction (horizontal or vertical). 34 | """ 35 | if bev_direction == 'horizontal': 36 | self.tensor[:, 1] = -self.tensor[:, 1] 37 | elif bev_direction == 'vertical': 38 | self.tensor[:, 0] = -self.tensor[:, 0] 39 | 40 | def convert_to(self, dst, rt_mat=None): 41 | """Convert self to ``dst`` mode. 42 | 43 | Args: 44 | dst (:obj:`CoordMode`): The target Point mode. 45 | rt_mat (np.ndarray | torch.Tensor, optional): The rotation and 46 | translation matrix between different coordinates. 47 | Defaults to None. 48 | The conversion from `src` coordinates to `dst` coordinates 49 | usually comes along the change of sensors, e.g., from camera 50 | to LiDAR. This requires a transformation matrix. 51 | 52 | Returns: 53 | :obj:`BasePoints`: The converted point of the same type 54 | in the `dst` mode. 55 | """ 56 | from mmdet3d.core.bbox import Coord3DMode 57 | return Coord3DMode.convert_point( 58 | point=self, src=Coord3DMode.LIDAR, dst=dst, rt_mat=rt_mat) 59 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/models/net.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import Module, Sequential, ModuleList 3 | 4 | from .blocks import * 5 | from .utils import * 6 | from .loss import * 7 | 8 | 9 | class DenoiseNet(Module): 10 | 11 | def __init__( 12 | self, 13 | loss_rec='emd', 14 | loss_ds='cd', 15 | activation='relu', 16 | dynamic_graph=True, 17 | conv_knns=[8, 16, 24], 18 | conv_channels=32, 19 | conv_layer_out_dim=24, 20 | gpool_use_mlp=False, 21 | use_random_mesh=False, 22 | use_random_pool=False, 23 | no_prefilter=False, 24 | ): 25 | super().__init__() 26 | self.feats = ModuleList() 27 | self.feat_dim = 0 28 | for knn in conv_knns: 29 | feat_unit = FeatureExtraction(dynamic_graph=dynamic_graph, conv_knn=knn, conv_channels=conv_channels, conv_layer_out_dim=conv_layer_out_dim, activation=activation) 30 | self.feats.append(feat_unit) 31 | self.feat_dim += feat_unit.out_channels 32 | 33 | self.downsample = DownsampleAdjust(feature_dim=self.feat_dim, ratio=0.5, use_mlp=gpool_use_mlp, activation=activation, random_pool=use_random_pool, pre_filter=not no_prefilter) 34 | 35 | if use_random_mesh: 36 | self.upsample = Upsampling(feature_dim=self.feat_dim, mesh_dim=2, mesh_steps=2, use_random_mesh=True, activation=activation) 37 | else: 38 | self.upsample = Upsampling(feature_dim=self.feat_dim, mesh_dim=1, mesh_steps=2, use_random_mesh=False, activation=activation) 39 | 40 | self.loss_ds = get_loss_layer(loss_ds) 41 | self.loss_rec = get_loss_layer(loss_rec) 42 | 43 | # print('Loss: ') 44 | # print(self.loss_ds) 45 | # print(self.loss_rec) 46 | 47 | self.epoch = 0 48 | 49 | def forward(self, pos): 50 | self.epoch += 1 51 | 52 | feats = [] 53 | for feat_unit in self.feats: 54 | feats.append(feat_unit(pos)) 55 | feat = torch.cat(feats, dim=-1) 56 | 57 | idx, pos, feat = self.downsample(pos, feat) 58 | self.adjusted = pos 59 | pos = self.upsample(pos, feat) 60 | self.preds = pos 61 | return pos 62 | 63 | def get_loss(self, gts, normals, inputs, **kwargs): 64 | loss = self.loss_rec(preds=self.preds, gts=gts, normals=normals, inputs=inputs, epoch=self.epoch) 65 | if self.loss_ds is not None: 66 | loss = loss + self.loss_ds(preds=self.preds, gts=gts, normals=normals, inputs=inputs, epoch=self.epoch) 67 | return loss 68 | 69 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/dynamic_voxelnet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv.runner import force_fp32 4 | from torch.nn import functional as F 5 | 6 | from ..builder import DETECTORS 7 | from .voxelnet import VoxelNet 8 | 9 | 10 | @DETECTORS.register_module() 11 | class DynamicVoxelNet(VoxelNet): 12 | r"""VoxelNet using `dynamic voxelization `_. 13 | """ 14 | 15 | def __init__(self, 16 | voxel_layer, 17 | voxel_encoder, 18 | middle_encoder, 19 | backbone, 20 | neck=None, 21 | bbox_head=None, 22 | train_cfg=None, 23 | test_cfg=None, 24 | pretrained=None, 25 | init_cfg=None): 26 | super(DynamicVoxelNet, self).__init__( 27 | voxel_layer=voxel_layer, 28 | voxel_encoder=voxel_encoder, 29 | middle_encoder=middle_encoder, 30 | backbone=backbone, 31 | neck=neck, 32 | bbox_head=bbox_head, 33 | train_cfg=train_cfg, 34 | test_cfg=test_cfg, 35 | pretrained=pretrained, 36 | init_cfg=init_cfg) 37 | 38 | def extract_feat(self, points, img_metas): 39 | """Extract features from points.""" 40 | voxels, coors = self.voxelize(points) 41 | voxel_features, feature_coors = self.voxel_encoder(voxels, coors) 42 | batch_size = coors[-1, 0].item() + 1 43 | x = self.middle_encoder(voxel_features, feature_coors, batch_size) 44 | x = self.backbone(x) 45 | if self.with_neck: 46 | x = self.neck(x) 47 | return x 48 | 49 | @torch.no_grad() 50 | @force_fp32() 51 | def voxelize(self, points): 52 | """Apply dynamic voxelization to points. 53 | 54 | Args: 55 | points (list[torch.Tensor]): Points of each sample. 56 | 57 | Returns: 58 | tuple[torch.Tensor]: Concatenated points and coordinates. 59 | """ 60 | coors = [] 61 | # dynamic voxelization only provide a coors mapping 62 | for res in points: 63 | res_coors = self.voxel_layer(res) 64 | coors.append(res_coors) 65 | points = torch.cat(points, dim=0) 66 | coors_batch = [] 67 | for i, coor in enumerate(coors): 68 | coor_pad = F.pad(coor, (1, 0), mode='constant', value=i) 69 | coors_batch.append(coor_pad) 70 | coors_batch = torch.cat(coors_batch, dim=0) 71 | return points, coors_batch 72 | -------------------------------------------------------------------------------- /mmdet3d/models/roi_heads/roi_extractors/single_roipoint_extractor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv import ops 4 | from torch import nn as nn 5 | 6 | from mmdet3d.core.bbox.structures import rotation_3d_in_axis 7 | from mmdet3d.models.builder import ROI_EXTRACTORS 8 | 9 | 10 | @ROI_EXTRACTORS.register_module() 11 | class Single3DRoIPointExtractor(nn.Module): 12 | """Point-wise roi-aware Extractor. 13 | 14 | Extract Point-wise roi features. 15 | 16 | Args: 17 | roi_layer (dict): The config of roi layer. 18 | """ 19 | 20 | def __init__(self, roi_layer=None): 21 | super(Single3DRoIPointExtractor, self).__init__() 22 | self.roi_layer = self.build_roi_layers(roi_layer) 23 | 24 | def build_roi_layers(self, layer_cfg): 25 | """Build roi layers using `layer_cfg`""" 26 | cfg = layer_cfg.copy() 27 | layer_type = cfg.pop('type') 28 | assert hasattr(ops, layer_type) 29 | layer_cls = getattr(ops, layer_type) 30 | roi_layers = layer_cls(**cfg) 31 | return roi_layers 32 | 33 | def forward(self, feats, coordinate, batch_inds, rois): 34 | """Extract point-wise roi features. 35 | 36 | Args: 37 | feats (torch.FloatTensor): Point-wise features with 38 | shape (batch, npoints, channels) for pooling. 39 | coordinate (torch.FloatTensor): Coordinate of each point. 40 | batch_inds (torch.LongTensor): Indicate the batch of each point. 41 | rois (torch.FloatTensor): Roi boxes with batch indices. 42 | 43 | Returns: 44 | torch.FloatTensor: Pooled features 45 | """ 46 | rois = rois[..., 1:] 47 | rois = rois.view(batch_inds, -1, rois.shape[-1]) 48 | with torch.no_grad(): 49 | pooled_roi_feat, pooled_empty_flag = self.roi_layer( 50 | coordinate, feats, rois) 51 | 52 | # canonical transformation 53 | roi_center = rois[:, :, 0:3] 54 | pooled_roi_feat[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2) 55 | pooled_roi_feat = pooled_roi_feat.view(-1, 56 | pooled_roi_feat.shape[-2], 57 | pooled_roi_feat.shape[-1]) 58 | pooled_roi_feat[:, :, 0:3] = rotation_3d_in_axis( 59 | pooled_roi_feat[:, :, 0:3], 60 | -(rois.view(-1, rois.shape[-1])[:, 6]), 61 | axis=2) 62 | pooled_roi_feat[pooled_empty_flag.view(-1) > 0] = 0 63 | 64 | return pooled_roi_feat 65 | -------------------------------------------------------------------------------- /mmdet3d/utils/setup_env.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import os 3 | import platform 4 | import warnings 5 | 6 | import cv2 7 | from torch import multiprocessing as mp 8 | 9 | 10 | def setup_multi_processes(cfg): 11 | """Setup multi-processing environment variables.""" 12 | # set multi-process start method as `fork` to speed up the training 13 | if platform.system() != 'Windows': 14 | mp_start_method = cfg.get('mp_start_method', 'fork') 15 | current_method = mp.get_start_method(allow_none=True) 16 | if current_method is not None and current_method != mp_start_method: 17 | warnings.warn( 18 | f'Multi-processing start method `{mp_start_method}` is ' 19 | f'different from the previous setting `{current_method}`.' 20 | f'It will be force set to `{mp_start_method}`. You can change ' 21 | f'this behavior by changing `mp_start_method` in your config.') 22 | mp.set_start_method(mp_start_method, force=True) 23 | 24 | # disable opencv multithreading to avoid system being overloaded 25 | opencv_num_threads = cfg.get('opencv_num_threads', 0) 26 | cv2.setNumThreads(opencv_num_threads) 27 | 28 | # setup OMP threads 29 | # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa 30 | workers_per_gpu = cfg.data.get('workers_per_gpu', 1) 31 | if 'train_dataloader' in cfg.data: 32 | workers_per_gpu = \ 33 | max(cfg.data.train_dataloader.get('workers_per_gpu', 1), 34 | workers_per_gpu) 35 | 36 | if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: 37 | omp_num_threads = 1 38 | warnings.warn( 39 | f'Setting OMP_NUM_THREADS environment variable for each process ' 40 | f'to be {omp_num_threads} in default, to avoid your system being ' 41 | f'overloaded, please further tune the variable for optimal ' 42 | f'performance in your application as needed.') 43 | os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) 44 | 45 | # setup MKL threads 46 | if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: 47 | mkl_num_threads = 1 48 | warnings.warn( 49 | f'Setting MKL_NUM_THREADS environment variable for each process ' 50 | f'to be {mkl_num_threads} in default, to avoid your system being ' 51 | f'overloaded, please further tune the variable for optimal ' 52 | f'performance in your application as needed.') 53 | os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) 54 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/models/pool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import Module, Linear, Sequential 3 | 4 | import numpy as np 5 | 6 | from .utils import FullyConnected 7 | 8 | 9 | class GPool(Module): 10 | 11 | def __init__(self, n, dim, use_mlp=False, mlp_activation='relu'): 12 | super().__init__() 13 | self.use_mlp = use_mlp 14 | if use_mlp: 15 | self.pre = Sequential( 16 | FullyConnected(dim, dim // 2, bias=True, activation=mlp_activation), 17 | FullyConnected(dim // 2, dim // 4, bias=True, activation=mlp_activation), 18 | ) 19 | self.p = Linear(dim // 4, 1, bias=True) 20 | else: 21 | self.p = Linear(dim, 1, bias=True) 22 | self.n = n 23 | 24 | def forward(self, pos, x): 25 | # pos : B * N * 3 26 | # x : B * N * Fin 27 | batchsize = x.size(0) 28 | if self.n < 1: 29 | k = int(x.size(1) * self.n) 30 | else: 31 | k = self.n 32 | 33 | if self.use_mlp: 34 | y = self.pre(x) 35 | else: 36 | y = x 37 | 38 | y = (self.p(y) / torch.norm(self.p.weight, p='fro')).squeeze(-1) # B * N 39 | 40 | top_idx = torch.argsort(y, dim=1, descending=True)[:, 0:k] # B * k 41 | y = torch.gather(y, dim=1, index=top_idx) # B * k 42 | y = torch.sigmoid(y) 43 | 44 | pos = torch.gather(pos, dim=1, index=top_idx.unsqueeze(-1).expand(batchsize, k, 3)) 45 | x = torch.gather(x, dim=1, index=top_idx.unsqueeze(-1).expand(batchsize, k, x.size(-1))) 46 | x = x * y.unsqueeze(-1).expand_as(x) 47 | 48 | return top_idx, pos, x 49 | 50 | 51 | class RandomPool(Module): 52 | 53 | def __init__(self, n): 54 | super().__init__() 55 | self.n = n 56 | 57 | def get_choice(self, batch, num_points): 58 | if self.n < 1: 59 | n = int(num_points * self.n) 60 | else: 61 | n = self.n 62 | choice = np.arange(0, num_points) 63 | np.random.shuffle(choice) 64 | choice = torch.from_numpy(choice[:n]).long() 65 | 66 | return choice.unsqueeze(0).repeat(batch, 1) 67 | 68 | def forward(self, pos, x): 69 | B, N, _ = pos.size() 70 | idx = self.get_choice(B, N).to(device=pos.device) # (B, K) 71 | 72 | pos = torch.gather(pos, dim=1, index=idx.unsqueeze(-1).repeat(1, 1, 3)) 73 | x = torch.gather(x, dim=1, index=idx.unsqueeze(-1).repeat(1, 1, x.size(-1))) 74 | 75 | return idx, pos, x 76 | 77 | 78 | if __name__ == '__main__': 79 | 80 | pool = RandomPool(100) 81 | print(pool.get_choice(2, 20)) 82 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/transforms.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | 5 | def bbox3d_mapping_back(bboxes, scale_factor, flip_horizontal, flip_vertical): 6 | """Map bboxes from testing scale to original image scale. 7 | 8 | Args: 9 | bboxes (:obj:`BaseInstance3DBoxes`): Boxes to be mapped back. 10 | scale_factor (float): Scale factor. 11 | flip_horizontal (bool): Whether to flip horizontally. 12 | flip_vertical (bool): Whether to flip vertically. 13 | 14 | Returns: 15 | :obj:`BaseInstance3DBoxes`: Boxes mapped back. 16 | """ 17 | new_bboxes = bboxes.clone() 18 | if flip_horizontal: 19 | new_bboxes.flip('horizontal') 20 | if flip_vertical: 21 | new_bboxes.flip('vertical') 22 | new_bboxes.scale(1 / scale_factor) 23 | 24 | return new_bboxes 25 | 26 | 27 | def bbox3d2roi(bbox_list): 28 | """Convert a list of bounding boxes to roi format. 29 | 30 | Args: 31 | bbox_list (list[torch.Tensor]): A list of bounding boxes 32 | corresponding to a batch of images. 33 | 34 | Returns: 35 | torch.Tensor: Region of interests in shape (n, c), where 36 | the channels are in order of [batch_ind, x, y ...]. 37 | """ 38 | rois_list = [] 39 | for img_id, bboxes in enumerate(bbox_list): 40 | if bboxes.size(0) > 0: 41 | img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) 42 | rois = torch.cat([img_inds, bboxes], dim=-1) 43 | else: 44 | rois = torch.zeros_like(bboxes) 45 | rois_list.append(rois) 46 | rois = torch.cat(rois_list, 0) 47 | return rois 48 | 49 | 50 | def bbox3d2result(bboxes, scores, labels, attrs=None): 51 | """Convert detection results to a list of numpy arrays. 52 | 53 | Args: 54 | bboxes (torch.Tensor): Bounding boxes with shape (N, 5). 55 | labels (torch.Tensor): Labels with shape (N, ). 56 | scores (torch.Tensor): Scores with shape (N, ). 57 | attrs (torch.Tensor, optional): Attributes with shape (N, ). 58 | Defaults to None. 59 | 60 | Returns: 61 | dict[str, torch.Tensor]: Bounding box results in cpu mode. 62 | 63 | - boxes_3d (torch.Tensor): 3D boxes. 64 | - scores (torch.Tensor): Prediction scores. 65 | - labels_3d (torch.Tensor): Box labels. 66 | - attrs_3d (torch.Tensor, optional): Box attributes. 67 | """ 68 | result_dict = dict( 69 | boxes_3d=bboxes.to('cpu'), 70 | scores_3d=scores.cpu(), 71 | labels_3d=labels.cpu()) 72 | 73 | if attrs is not None: 74 | result_dict['attrs_3d'] = attrs.cpu() 75 | 76 | return result_dict 77 | -------------------------------------------------------------------------------- /mmdet3d/models/detectors/single_stage.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..builder import DETECTORS, build_backbone, build_head, build_neck 3 | from .base import Base3DDetector 4 | 5 | 6 | @DETECTORS.register_module() 7 | class SingleStage3DDetector(Base3DDetector): 8 | """SingleStage3DDetector. 9 | 10 | This class serves as a base class for single-stage 3D detectors. 11 | 12 | Args: 13 | backbone (dict): Config dict of detector's backbone. 14 | neck (dict, optional): Config dict of neck. Defaults to None. 15 | bbox_head (dict, optional): Config dict of box head. Defaults to None. 16 | train_cfg (dict, optional): Config dict of training hyper-parameters. 17 | Defaults to None. 18 | test_cfg (dict, optional): Config dict of test hyper-parameters. 19 | Defaults to None. 20 | pretrained (str, optional): Path of pretrained models. 21 | Defaults to None. 22 | """ 23 | 24 | def __init__(self, 25 | backbone, 26 | neck=None, 27 | bbox_head=None, 28 | train_cfg=None, 29 | test_cfg=None, 30 | init_cfg=None, 31 | pretrained=None): 32 | super(SingleStage3DDetector, self).__init__(init_cfg) 33 | self.backbone = build_backbone(backbone) 34 | if neck is not None: 35 | self.neck = build_neck(neck) 36 | bbox_head.update(train_cfg=train_cfg) 37 | bbox_head.update(test_cfg=test_cfg) 38 | self.bbox_head = build_head(bbox_head) 39 | self.train_cfg = train_cfg 40 | self.test_cfg = test_cfg 41 | 42 | def forward_dummy(self, points): 43 | """Used for computing network flops. 44 | 45 | See `mmdetection/tools/analysis_tools/get_flops.py` 46 | """ 47 | x = self.extract_feat(points) 48 | try: 49 | sample_mod = self.train_cfg.sample_mod 50 | outs = self.bbox_head(x, sample_mod) 51 | except AttributeError: 52 | outs = self.bbox_head(x) 53 | return outs 54 | 55 | def extract_feat(self, points, img_metas=None): 56 | """Directly extract features from the backbone+neck. 57 | 58 | Args: 59 | points (torch.Tensor): Input points. 60 | """ 61 | x = self.backbone(points) 62 | if self.with_neck: 63 | x = self.neck(x) 64 | return x 65 | 66 | def extract_feats(self, points, img_metas): 67 | """Extract features of multiple samples.""" 68 | return [ 69 | self.extract_feat(pts, img_meta) 70 | for pts, img_meta in zip(points, img_metas) 71 | ] 72 | -------------------------------------------------------------------------------- /mmdet3d/core/points/cam_points.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_points import BasePoints 3 | 4 | 5 | class CameraPoints(BasePoints): 6 | """Points of instances in CAM coordinates. 7 | 8 | Args: 9 | tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. 10 | points_dim (int, optional): Number of the dimension of a point. 11 | Each row is (x, y, z). Defaults to 3. 12 | attribute_dims (dict, optional): Dictionary to indicate the 13 | meaning of extra dimension. Defaults to None. 14 | 15 | Attributes: 16 | tensor (torch.Tensor): Float matrix of N x points_dim. 17 | points_dim (int): Integer indicating the dimension of a point. 18 | Each row is (x, y, z, ...). 19 | attribute_dims (bool): Dictionary to indicate the meaning of extra 20 | dimension. Defaults to None. 21 | rotation_axis (int): Default rotation axis for points rotation. 22 | """ 23 | 24 | def __init__(self, tensor, points_dim=3, attribute_dims=None): 25 | super(CameraPoints, self).__init__( 26 | tensor, points_dim=points_dim, attribute_dims=attribute_dims) 27 | self.rotation_axis = 1 28 | 29 | def flip(self, bev_direction='horizontal'): 30 | """Flip the points along given BEV direction. 31 | 32 | Args: 33 | bev_direction (str): Flip direction (horizontal or vertical). 34 | """ 35 | if bev_direction == 'horizontal': 36 | self.tensor[:, 0] = -self.tensor[:, 0] 37 | elif bev_direction == 'vertical': 38 | self.tensor[:, 2] = -self.tensor[:, 2] 39 | 40 | @property 41 | def bev(self): 42 | """torch.Tensor: BEV of the points in shape (N, 2).""" 43 | return self.tensor[:, [0, 2]] 44 | 45 | def convert_to(self, dst, rt_mat=None): 46 | """Convert self to ``dst`` mode. 47 | 48 | Args: 49 | dst (:obj:`CoordMode`): The target Point mode. 50 | rt_mat (np.ndarray | torch.Tensor, optional): The rotation and 51 | translation matrix between different coordinates. 52 | Defaults to None. 53 | The conversion from `src` coordinates to `dst` coordinates 54 | usually comes along the change of sensors, e.g., from camera 55 | to LiDAR. This requires a transformation matrix. 56 | 57 | Returns: 58 | :obj:`BasePoints`: The converted point of the same type 59 | in the `dst` mode. 60 | """ 61 | from mmdet3d.core.bbox import Coord3DMode 62 | return Coord3DMode.convert_point( 63 | point=self, src=Coord3DMode.CAM, dst=dst, rt_mat=rt_mat) 64 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/models/conv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import Module, Linear, ModuleList 3 | import torch.nn.functional as F 4 | 5 | from .utils import * 6 | 7 | 8 | class DenseEdgeConv(Module): 9 | 10 | def __init__(self, in_channels, num_layers, layer_out_dim, knn=16, aggr='max', activation='relu'): 11 | super().__init__() 12 | self.in_channels = in_channels 13 | self.knn = knn 14 | assert num_layers > 2 15 | self.num_layers = num_layers 16 | self.layer_out_dim = layer_out_dim 17 | 18 | # Densely Connected Layers 19 | self.layer_first = FullyConnected(3*in_channels, layer_out_dim, bias=True, activation=activation) 20 | self.layer_last = FullyConnected(in_channels + (num_layers - 1) * layer_out_dim, layer_out_dim, bias=True, activation=None) 21 | self.layers = ModuleList() 22 | for i in range(1, num_layers-1): 23 | self.layers.append(FullyConnected(in_channels + i * layer_out_dim, layer_out_dim, bias=True, activation=activation)) 24 | 25 | self.aggr = Aggregator(aggr) 26 | 27 | @property 28 | def out_channels(self): 29 | return self.in_channels + self.num_layers * self.layer_out_dim 30 | 31 | def get_edge_feature(self, x, knn_idx): 32 | """ 33 | :param x: (B, N, d) 34 | :param knn_idx: (B, N, K) 35 | :return (B, N, K, 2*d) 36 | """ 37 | knn_feat = group(x, knn_idx) # B * N * K * d 38 | x_tiled = x.unsqueeze(-2).expand_as(knn_feat) 39 | edge_feat = torch.cat([x_tiled, knn_feat, knn_feat - x_tiled], dim=3) 40 | return edge_feat 41 | 42 | def forward(self, x, pos): 43 | """ 44 | :param x: (B, N, d) 45 | :return (B, N, d+L*c) 46 | """ 47 | knn_idx = get_knn_idx(pos, pos, k=self.knn, offset=1) 48 | 49 | # First Layer 50 | edge_feat = self.get_edge_feature(x, knn_idx) 51 | y = torch.cat([ 52 | self.layer_first(edge_feat), # (B, N, K, c) 53 | x.unsqueeze(-2).repeat(1, 1, self.knn, 1) # (B, N, K, d) 54 | ], dim=-1) # (B, N, K, d+c) 55 | 56 | # Intermediate Layers 57 | for layer in self.layers: 58 | y = torch.cat([ 59 | layer(y), # (B, N, K, c) 60 | y, # (B, N, K, c+d) 61 | ], dim=-1) # (B, N, K, d+c+...) 62 | 63 | # Last Layer 64 | y = torch.cat([ 65 | self.layer_last(y), # (B, N, K, c) 66 | y # (B, N, K, d+(L-1)*c) 67 | ], dim=-1) # (B, N, K, d+L*c) 68 | 69 | # Pooling 70 | y = self.aggr(y, dim=-2) 71 | 72 | return y 73 | -------------------------------------------------------------------------------- /mmdet3d/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmdet.datasets.builder import build_dataloader 3 | from .builder import DATASETS, PIPELINES, build_dataset 4 | from .custom_3d import Custom3DDataset 5 | from .custom_3d_seg import Custom3DSegDataset 6 | from .kitti_dataset import KittiDataset 7 | from .kitti_mono_dataset import KittiMonoDataset 8 | from .lyft_dataset import LyftDataset 9 | from .nuscenes_dataset import NuScenesDataset 10 | from .nuscenes_mono_dataset import NuScenesMonoDataset 11 | # yapf: disable 12 | from .pipelines import (AffineResize, BackgroundPointsFilter, GlobalAlignment, 13 | GlobalRotScaleTrans, IndoorPatchPointSample, 14 | IndoorPointSample, LoadAnnotations3D, 15 | LoadPointsFromDict, LoadPointsFromFile, 16 | LoadPointsFromMultiSweeps, MultiViewWrapper, 17 | NormalizePointsColor, ObjectNameFilter, ObjectNoise, 18 | ObjectRangeFilter, ObjectSample, PointSample, 19 | PointShuffle, PointsRangeFilter, RandomDropPointsColor, 20 | RandomFlip3D, RandomJitterPoints, RandomRotate, 21 | RandomShiftScale, RangeLimitedRandomCrop, 22 | VoxelBasedPointSampler) 23 | # yapf: enable 24 | from .s3dis_dataset import S3DISDataset, S3DISSegDataset 25 | from .scannet_dataset import (ScanNetDataset, ScanNetInstanceSegDataset, 26 | ScanNetSegDataset) 27 | from .semantickitti_dataset import SemanticKITTIDataset 28 | from .sunrgbd_dataset import SUNRGBDDataset 29 | from .utils import get_loading_pipeline 30 | from .waymo_dataset import WaymoDataset 31 | 32 | __all__ = [ 33 | 'KittiDataset', 'KittiMonoDataset', 'build_dataloader', 'DATASETS', 34 | 'build_dataset', 'NuScenesDataset', 'NuScenesMonoDataset', 'LyftDataset', 35 | 'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans', 36 | 'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', 37 | 'LoadPointsFromFile', 'S3DISSegDataset', 'S3DISDataset', 38 | 'NormalizePointsColor', 'IndoorPatchPointSample', 'IndoorPointSample', 39 | 'PointSample', 'LoadAnnotations3D', 'GlobalAlignment', 'SUNRGBDDataset', 40 | 'ScanNetDataset', 'ScanNetSegDataset', 'ScanNetInstanceSegDataset', 41 | 'SemanticKITTIDataset', 'Custom3DDataset', 'Custom3DSegDataset', 42 | 'LoadPointsFromMultiSweeps', 'WaymoDataset', 'BackgroundPointsFilter', 43 | 'VoxelBasedPointSampler', 'get_loading_pipeline', 'RandomDropPointsColor', 44 | 'RandomJitterPoints', 'ObjectNameFilter', 'AffineResize', 45 | 'RandomShiftScale', 'LoadPointsFromDict', 'PIPELINES', 46 | 'RangeLimitedRandomCrop', 'RandomRotate', 'MultiViewWrapper' 47 | ] 48 | -------------------------------------------------------------------------------- /mmdet3d/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.ops import (RoIAlign, SigmoidFocalLoss, get_compiler_version, 3 | get_compiling_cuda_version, nms, roi_align, 4 | sigmoid_focal_loss) 5 | from mmcv.ops.assign_score_withk import assign_score_withk 6 | from mmcv.ops.ball_query import ball_query 7 | from mmcv.ops.furthest_point_sample import (furthest_point_sample, 8 | furthest_point_sample_with_dist) 9 | from mmcv.ops.gather_points import gather_points 10 | from mmcv.ops.group_points import GroupAll, QueryAndGroup, grouping_operation 11 | from mmcv.ops.knn import knn 12 | from mmcv.ops.points_in_boxes import (points_in_boxes_all, points_in_boxes_cpu, 13 | points_in_boxes_part) 14 | from mmcv.ops.points_sampler import PointsSampler as Points_Sampler 15 | from mmcv.ops.roiaware_pool3d import RoIAwarePool3d 16 | from mmcv.ops.roipoint_pool3d import RoIPointPool3d 17 | from mmcv.ops.scatter_points import DynamicScatter, dynamic_scatter 18 | from mmcv.ops.three_interpolate import three_interpolate 19 | from mmcv.ops.three_nn import three_nn 20 | from mmcv.ops.voxelize import Voxelization, voxelization 21 | 22 | from .dgcnn_modules import DGCNNFAModule, DGCNNFPModule, DGCNNGFModule 23 | from .norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm2d 24 | from .paconv import PAConv, PAConvCUDA 25 | from .pointnet_modules import (PAConvCUDASAModule, PAConvCUDASAModuleMSG, 26 | PAConvSAModule, PAConvSAModuleMSG, 27 | PointFPModule, PointSAModule, PointSAModuleMSG, 28 | build_sa_module) 29 | from .sparse_block import (SparseBasicBlock, SparseBottleneck, 30 | make_sparse_convmodule) 31 | 32 | __all__ = [ 33 | 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'get_compiler_version', 34 | 'get_compiling_cuda_version', 'NaiveSyncBatchNorm1d', 35 | 'NaiveSyncBatchNorm2d', 'batched_nms', 'Voxelization', 'voxelization', 36 | 'dynamic_scatter', 'DynamicScatter', 'sigmoid_focal_loss', 37 | 'SigmoidFocalLoss', 'SparseBasicBlock', 'SparseBottleneck', 38 | 'RoIAwarePool3d', 'points_in_boxes_part', 'points_in_boxes_cpu', 39 | 'make_sparse_convmodule', 'ball_query', 'knn', 'furthest_point_sample', 40 | 'furthest_point_sample_with_dist', 'three_interpolate', 'three_nn', 41 | 'gather_points', 'grouping_operation', 'GroupAll', 'QueryAndGroup', 42 | 'PointSAModule', 'PointSAModuleMSG', 'PointFPModule', 'DGCNNFPModule', 43 | 'DGCNNGFModule', 'DGCNNFAModule', 'points_in_boxes_all', 44 | 'get_compiler_version', 'assign_score_withk', 'get_compiling_cuda_version', 45 | 'Points_Sampler', 'build_sa_module', 'PAConv', 'PAConvCUDA', 46 | 'PAConvSAModuleMSG', 'PAConvSAModule', 'PAConvCUDASAModule', 47 | 'PAConvCUDASAModuleMSG', 'RoIPointPool3d' 48 | ] 49 | -------------------------------------------------------------------------------- /deephub/detection_model/heads/anchor3d_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv.runner import BaseModule 4 | from torch import nn as nn 5 | 6 | from mmdet.core import multi_apply 7 | 8 | 9 | class Anchor3DHead(BaseModule): 10 | """Anchor head for SECOND/PointPillars/MVXNet/PartA2. 11 | 12 | Args: 13 | num_classes (int): Number of classes. 14 | in_channels (int): Number of channels in the input feature map. 15 | feat_channels (int): Number of channels of the feature map. 16 | use_direction_classifier (bool): Whether to add a direction classifier. 17 | """ 18 | 19 | def __init__(self, 20 | num_classes, 21 | in_channels, 22 | feat_channels=256, 23 | use_direction_classifier=True, 24 | init_cfg=None): 25 | super().__init__(init_cfg=init_cfg) 26 | self.in_channels = in_channels 27 | self.num_classes = num_classes 28 | self.feat_channels = feat_channels 29 | self.use_direction_classifier = use_direction_classifier 30 | self.num_anchors = 2 31 | self.box_code_size = 7 32 | 33 | self._init_layers() 34 | 35 | def _init_layers(self): 36 | """Initialize neural network layers of the head.""" 37 | self.cls_out_channels = self.num_anchors * self.num_classes 38 | self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) 39 | self.conv_reg = nn.Conv2d(self.feat_channels, 40 | self.num_anchors * self.box_code_size, 1) 41 | if self.use_direction_classifier: 42 | self.conv_dir_cls = nn.Conv2d(self.feat_channels, 43 | self.num_anchors * 2, 1) 44 | 45 | def forward_single(self, x): 46 | """Forward function on a single-scale feature map. 47 | 48 | Args: 49 | x (torch.Tensor): Input features. 50 | 51 | Returns: 52 | tuple[torch.Tensor]: Contain score of each class, bbox 53 | regression and direction classification predictions. 54 | """ 55 | cls_score = self.conv_cls(x) 56 | bbox_pred = self.conv_reg(x) 57 | dir_cls_preds = None 58 | if self.use_direction_classifier: 59 | dir_cls_preds = self.conv_dir_cls(x) 60 | return cls_score, bbox_pred, dir_cls_preds 61 | 62 | def forward(self, feats): 63 | """Forward pass. 64 | 65 | Args: 66 | feats (list[torch.Tensor]): Multi-level features, e.g., 67 | features produced by FPN. 68 | 69 | Returns: 70 | tuple[list[torch.Tensor]]: Multi-level class score, bbox 71 | and direction predictions. 72 | """ 73 | return multi_apply(self.forward_single, feats) 74 | -------------------------------------------------------------------------------- /mmdet3d/datasets/dataset_wrappers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import numpy as np 3 | 4 | from .builder import DATASETS 5 | 6 | 7 | @DATASETS.register_module() 8 | class CBGSDataset(object): 9 | """A wrapper of class sampled dataset with ann_file path. Implementation of 10 | paper `Class-balanced Grouping and Sampling for Point Cloud 3D Object 11 | Detection `_. 12 | 13 | Balance the number of scenes under different classes. 14 | 15 | Args: 16 | dataset (:obj:`CustomDataset`): The dataset to be class sampled. 17 | """ 18 | 19 | def __init__(self, dataset): 20 | self.dataset = dataset 21 | self.CLASSES = dataset.CLASSES 22 | self.cat2id = {name: i for i, name in enumerate(self.CLASSES)} 23 | self.sample_indices = self._get_sample_indices() 24 | # self.dataset.data_infos = self.data_infos 25 | if hasattr(self.dataset, 'flag'): 26 | self.flag = np.array( 27 | [self.dataset.flag[ind] for ind in self.sample_indices], 28 | dtype=np.uint8) 29 | 30 | def _get_sample_indices(self): 31 | """Load annotations from ann_file. 32 | 33 | Args: 34 | ann_file (str): Path of the annotation file. 35 | 36 | Returns: 37 | list[dict]: List of annotations after class sampling. 38 | """ 39 | class_sample_idxs = {cat_id: [] for cat_id in self.cat2id.values()} 40 | for idx in range(len(self.dataset)): 41 | sample_cat_ids = self.dataset.get_cat_ids(idx) 42 | for cat_id in sample_cat_ids: 43 | class_sample_idxs[cat_id].append(idx) 44 | duplicated_samples = sum( 45 | [len(v) for _, v in class_sample_idxs.items()]) 46 | class_distribution = { 47 | k: len(v) / duplicated_samples 48 | for k, v in class_sample_idxs.items() 49 | } 50 | 51 | sample_indices = [] 52 | 53 | frac = 1.0 / len(self.CLASSES) 54 | ratios = [frac / v for v in class_distribution.values()] 55 | for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios): 56 | sample_indices += np.random.choice(cls_inds, 57 | int(len(cls_inds) * 58 | ratio)).tolist() 59 | return sample_indices 60 | 61 | def __getitem__(self, idx): 62 | """Get item from infos according to the given index. 63 | 64 | Returns: 65 | dict: Data dictionary of the corresponding index. 66 | """ 67 | ori_idx = self.sample_indices[idx] 68 | return self.dataset[ori_idx] 69 | 70 | def __len__(self): 71 | """Return the length of data infos. 72 | 73 | Returns: 74 | int: Length of data infos. 75 | """ 76 | return len(self.sample_indices) 77 | -------------------------------------------------------------------------------- /mmdet3d/core/evaluation/scannet_utils/util_3d.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | # adapted from https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/util_3d.py # noqa 3 | import json 4 | 5 | import numpy as np 6 | 7 | 8 | class Instance: 9 | """Single instance for ScanNet evaluator. 10 | 11 | Args: 12 | mesh_vert_instances (np.array): Instance ids for each point. 13 | instance_id: Id of single instance. 14 | """ 15 | instance_id = 0 16 | label_id = 0 17 | vert_count = 0 18 | med_dist = -1 19 | dist_conf = 0.0 20 | 21 | def __init__(self, mesh_vert_instances, instance_id): 22 | if instance_id == -1: 23 | return 24 | self.instance_id = int(instance_id) 25 | self.label_id = int(self.get_label_id(instance_id)) 26 | self.vert_count = int( 27 | self.get_instance_verts(mesh_vert_instances, instance_id)) 28 | 29 | @staticmethod 30 | def get_label_id(instance_id): 31 | return int(instance_id // 1000) 32 | 33 | @staticmethod 34 | def get_instance_verts(mesh_vert_instances, instance_id): 35 | return (mesh_vert_instances == instance_id).sum() 36 | 37 | def to_json(self): 38 | return json.dumps( 39 | self, default=lambda o: o.__dict__, sort_keys=True, indent=4) 40 | 41 | def to_dict(self): 42 | dict = {} 43 | dict['instance_id'] = self.instance_id 44 | dict['label_id'] = self.label_id 45 | dict['vert_count'] = self.vert_count 46 | dict['med_dist'] = self.med_dist 47 | dict['dist_conf'] = self.dist_conf 48 | return dict 49 | 50 | def from_json(self, data): 51 | self.instance_id = int(data['instance_id']) 52 | self.label_id = int(data['label_id']) 53 | self.vert_count = int(data['vert_count']) 54 | if 'med_dist' in data: 55 | self.med_dist = float(data['med_dist']) 56 | self.dist_conf = float(data['dist_conf']) 57 | 58 | def __str__(self): 59 | return '(' + str(self.instance_id) + ')' 60 | 61 | 62 | def get_instances(ids, class_ids, class_labels, id2label): 63 | """Transform gt instance mask to Instance objects. 64 | 65 | Args: 66 | ids (np.array): Instance ids for each point. 67 | class_ids: (tuple[int]): Ids of valid categories. 68 | class_labels (tuple[str]): Class names. 69 | id2label: (dict[int, str]): Mapping of valid class id to class label. 70 | 71 | Returns: 72 | dict [str, list]: Instance objects grouped by class label. 73 | """ 74 | instances = {} 75 | for label in class_labels: 76 | instances[label] = [] 77 | instance_ids = np.unique(ids) 78 | for id in instance_ids: 79 | if id == 0: 80 | continue 81 | inst = Instance(ids, id) 82 | if inst.label_id in class_ids: 83 | instances[id2label[inst.label_id]].append(inst.to_dict()) 84 | return instances 85 | -------------------------------------------------------------------------------- /deephub/denoisy_model/dmr/utils/dataset.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data import Dataset 3 | import numpy as np 4 | import h5py 5 | 6 | class H5Dataset(Dataset): 7 | 8 | def __init__(self, h5py_filename, dataset_name, normal_name='normal', batch_size=1, transform=None): 9 | super().__init__() 10 | h5file = h5py.File(h5py_filename, mode='r') 11 | self.pointclouds = h5file[dataset_name] 12 | self.normals = h5file[normal_name] if normal_name is not None else None 13 | self.transform = transform 14 | self.batch_size = batch_size 15 | 16 | def __len__(self): 17 | return (self.pointclouds.shape[0] // self.batch_size) * self.batch_size 18 | 19 | def __getitem__(self, idx:int): 20 | item = { 21 | 'pos' : torch.FloatTensor(self.pointclouds[idx]), 22 | } 23 | if self.normals is not None: 24 | item['normal'] = torch.FloatTensor(self.normals[idx]) 25 | 26 | if self.transform is not None: 27 | item = self.transform(item) 28 | 29 | return item 30 | 31 | 32 | class MultipleH5Dataset(Dataset): 33 | 34 | def __init__(self, files, dataset_name, normal_name=None, batch_size=1, transform=None, random_get=False, subset_size=-1): 35 | super().__init__() 36 | pointclouds = [] 37 | normals = [] 38 | for filename in files: 39 | h5file = h5py.File(filename, mode='r') 40 | pointclouds.append(h5file[dataset_name]) 41 | if normal_name is not None: 42 | normals.append(h5file[normal_name]) 43 | self.pointclouds = np.concatenate(pointclouds, axis=0) 44 | self.normals = np.concatenate(normals, axis=0) if normal_name is not None else None 45 | self.transform = transform 46 | self.batch_size = batch_size 47 | self.random_get = random_get 48 | self.subset_size = subset_size 49 | 50 | def __len__(self): 51 | if self.subset_size >= self.batch_size: 52 | return (self.subset_size // self.batch_size) * self.batch_size 53 | else: 54 | return (self.pointclouds.shape[0] // self.batch_size) * self.batch_size 55 | 56 | def __getitem__(self, idx:int): 57 | if self.random_get: 58 | idx = np.random.randint(0, self.pointclouds.shape[0] - 1) 59 | 60 | item = { 61 | 'pos' : torch.FloatTensor(self.pointclouds[idx]), 62 | } 63 | if self.normals is not None: 64 | item['normal'] = torch.FloatTensor(self.normals[idx]) 65 | 66 | if self.transform is not None: 67 | item = self.transform(item) 68 | 69 | return item 70 | 71 | 72 | if __name__ == '__main__': 73 | dataset = MultipleH5Dataset([ 74 | './data/patches_10k_1024.h5', 75 | './data/patches_20k_1024.h5', 76 | './data/patches_30k_1024.h5', 77 | './data/patches_50k_1024.h5', 78 | './data/patches_80k_1024.h5'], dataset_name='train') 79 | print(dataset.pointclouds.shape) -------------------------------------------------------------------------------- /model/model_compressor/demo_dynamic_quant.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | CURRENT_DIR = os.path.split(os.path.abspath(__file__))[0] 5 | config_path = CURRENT_DIR.rsplit('/', 2)[0] 6 | sys.path.append(config_path) 7 | from deephub.detection_model import Pointpillars, Centerpoint 8 | from mmcv.runner import load_checkpoint 9 | from model.model_deployor.deployor_utils import create_input 10 | from nni.compression.pytorch.pruning import L1NormPruner 11 | from model.model_deployor.deployor import deploy 12 | from nni.compression.pytorch.speedup import ModelSpeedup 13 | from model.model_compressor.compressor import * 14 | 15 | import time 16 | import faulthandler;faulthandler.enable() 17 | import numpy as np 18 | import copy 19 | import torch.nn.utils.prune as prune 20 | def main(): 21 | start = time.time() 22 | model = Pointpillars() 23 | 24 | load_checkpoint(model, '../../checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth', map_location='cpu') 25 | model.cuda() 26 | 27 | model.eval() 28 | input_names = ['voxels', 'num_points', 'coors'] 29 | output_names = ['scores', 'bbox_preds', 'dir_scores'] 30 | dynamic_axes = {'voxels': {0: 'voxels_num'}, 31 | 'num_points': {0: 'voxels_num'}, 32 | 'coors': {0: 'voxels_num'}} 33 | # dynamic_axes = None 34 | 35 | pcd = '../../test/test_model_ops/data/kitti/kitti_000008.bin' 36 | checkpoint = '../../checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth' 37 | dataset = 'kitti' 38 | model_name = 'pointpillars' 39 | device = 'cuda:0' 40 | backend = 'onnxruntime' 41 | output = 'pointpillars' 42 | fp16 = False 43 | 44 | data, model_inputs = create_input(pcd, dataset, model_name, device) 45 | 46 | backend_file = deploy(model, model_inputs, input_names, output_names, dynamic_axes, 47 | backend=backend, output_file=output, fp16=fp16, dataset=dataset) 48 | 49 | #1 dynamic quant (torch) 50 | #---------------------------------------- 51 | dynamic_quant(model) 52 | #---------------------------------------- 53 | 54 | torch_out = model(model_inputs[0], model_inputs[1], model_inputs[2]) 55 | 56 | 57 | if backend == 'onnxruntime': 58 | import onnxruntime 59 | 60 | ort_session = onnxruntime.InferenceSession(backend_file) 61 | 62 | input_dict = {} 63 | input_dict['voxels'] = model_inputs[0].cpu().numpy() 64 | input_dict['num_points'] = model_inputs[1].cpu().numpy() 65 | input_dict['coors'] = model_inputs[2].cpu().numpy() 66 | ort_output = ort_session.run(['scores', 'bbox_preds', 'dir_scores'], input_dict) 67 | 68 | outputs = {} 69 | outputs['scores'] = torch.tensor(ort_output[0]) 70 | outputs['bbox_preds'] = torch.tensor(ort_output[1]) 71 | outputs['dir_scores'] = torch.tensor(ort_output[2]) 72 | 73 | print('onnx : inference successful!') 74 | 75 | print(time.time() - start) 76 | 77 | if __name__ == '__main__': 78 | main() -------------------------------------------------------------------------------- /mmdet3d/models/losses/axis_aligned_iou_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from torch import nn as nn 4 | 5 | from mmdet.models.losses.utils import weighted_loss 6 | from ...core.bbox import AxisAlignedBboxOverlaps3D 7 | from ..builder import LOSSES 8 | 9 | 10 | @weighted_loss 11 | def axis_aligned_iou_loss(pred, target): 12 | """Calculate the IoU loss (1-IoU) of two set of axis aligned bounding 13 | boxes. Note that predictions and targets are one-to-one corresponded. 14 | 15 | Args: 16 | pred (torch.Tensor): Bbox predictions with shape [..., 3]. 17 | target (torch.Tensor): Bbox targets (gt) with shape [..., 3]. 18 | 19 | Returns: 20 | torch.Tensor: IoU loss between predictions and targets. 21 | """ 22 | 23 | axis_aligned_iou = AxisAlignedBboxOverlaps3D()( 24 | pred, target, is_aligned=True) 25 | iou_loss = 1 - axis_aligned_iou 26 | return iou_loss 27 | 28 | 29 | @LOSSES.register_module() 30 | class AxisAlignedIoULoss(nn.Module): 31 | """Calculate the IoU loss (1-IoU) of axis aligned bounding boxes. 32 | 33 | Args: 34 | reduction (str): Method to reduce losses. 35 | The valid reduction method are none, sum or mean. 36 | loss_weight (float, optional): Weight of loss. Defaults to 1.0. 37 | """ 38 | 39 | def __init__(self, reduction='mean', loss_weight=1.0): 40 | super(AxisAlignedIoULoss, self).__init__() 41 | assert reduction in ['none', 'sum', 'mean'] 42 | self.reduction = reduction 43 | self.loss_weight = loss_weight 44 | 45 | def forward(self, 46 | pred, 47 | target, 48 | weight=None, 49 | avg_factor=None, 50 | reduction_override=None, 51 | **kwargs): 52 | """Forward function of loss calculation. 53 | 54 | Args: 55 | pred (torch.Tensor): Bbox predictions with shape [..., 3]. 56 | target (torch.Tensor): Bbox targets (gt) with shape [..., 3]. 57 | weight (torch.Tensor | float, optional): Weight of loss. 58 | Defaults to None. 59 | avg_factor (int, optional): Average factor that is used to average 60 | the loss. Defaults to None. 61 | reduction_override (str, optional): Method to reduce losses. 62 | The valid reduction method are 'none', 'sum' or 'mean'. 63 | Defaults to None. 64 | 65 | Returns: 66 | torch.Tensor: IoU loss between predictions and targets. 67 | """ 68 | assert reduction_override in (None, 'none', 'mean', 'sum') 69 | reduction = ( 70 | reduction_override if reduction_override else self.reduction) 71 | if (weight is not None) and (not torch.any(weight > 0)) and ( 72 | reduction != 'none'): 73 | return (pred * weight).sum() 74 | return axis_aligned_iou_loss( 75 | pred, 76 | target, 77 | weight=weight, 78 | avg_factor=avg_factor, 79 | reduction=reduction) * self.loss_weight 80 | -------------------------------------------------------------------------------- /mmdet3d/ops/pointnet_modules/point_fp_module.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from typing import List 3 | 4 | import torch 5 | from mmcv.cnn import ConvModule 6 | from mmcv.ops import three_interpolate, three_nn 7 | from mmcv.runner import BaseModule, force_fp32 8 | from torch import nn as nn 9 | 10 | 11 | class PointFPModule(BaseModule): 12 | """Point feature propagation module used in PointNets. 13 | 14 | Propagate the features from one set to another. 15 | 16 | Args: 17 | mlp_channels (list[int]): List of mlp channels. 18 | norm_cfg (dict, optional): Type of normalization method. 19 | Default: dict(type='BN2d'). 20 | """ 21 | 22 | def __init__(self, 23 | mlp_channels: List[int], 24 | norm_cfg: dict = dict(type='BN2d'), 25 | init_cfg=None): 26 | super().__init__(init_cfg=init_cfg) 27 | self.fp16_enabled = False 28 | self.mlps = nn.Sequential() 29 | for i in range(len(mlp_channels) - 1): 30 | self.mlps.add_module( 31 | f'layer{i}', 32 | ConvModule( 33 | mlp_channels[i], 34 | mlp_channels[i + 1], 35 | kernel_size=(1, 1), 36 | stride=(1, 1), 37 | conv_cfg=dict(type='Conv2d'), 38 | norm_cfg=norm_cfg)) 39 | 40 | @force_fp32() 41 | def forward(self, target: torch.Tensor, source: torch.Tensor, 42 | target_feats: torch.Tensor, 43 | source_feats: torch.Tensor) -> torch.Tensor: 44 | """forward. 45 | 46 | Args: 47 | target (Tensor): (B, n, 3) tensor of the xyz positions of 48 | the target features. 49 | source (Tensor): (B, m, 3) tensor of the xyz positions of 50 | the source features. 51 | target_feats (Tensor): (B, C1, n) tensor of the features to be 52 | propagated to. 53 | source_feats (Tensor): (B, C2, m) tensor of features 54 | to be propagated. 55 | 56 | Return: 57 | Tensor: (B, M, N) M = mlp[-1], tensor of the target features. 58 | """ 59 | if source is not None: 60 | dist, idx = three_nn(target, source) 61 | dist_reciprocal = 1.0 / (dist + 1e-8) 62 | norm = torch.sum(dist_reciprocal, dim=2, keepdim=True) 63 | weight = dist_reciprocal / norm 64 | 65 | interpolated_feats = three_interpolate(source_feats, idx, weight) 66 | else: 67 | interpolated_feats = source_feats.expand(*source_feats.size()[0:2], 68 | target.size(1)) 69 | 70 | if target_feats is not None: 71 | new_features = torch.cat([interpolated_feats, target_feats], 72 | dim=1) # (B, C2 + C1, n) 73 | else: 74 | new_features = interpolated_feats 75 | 76 | new_features = new_features.unsqueeze(-1) 77 | new_features = self.mlps(new_features) 78 | 79 | return new_features.squeeze(-1) 80 | -------------------------------------------------------------------------------- /model/model_compressor/demo_torch_prune.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | CURRENT_DIR = os.path.split(os.path.abspath(__file__))[0] 5 | config_path = CURRENT_DIR.rsplit('/', 2)[0] 6 | sys.path.append(config_path) 7 | from deephub.detection_model import Pointpillars, Centerpoint 8 | from mmcv.runner import load_checkpoint 9 | from model.model_deployor.deployor_utils import create_input 10 | from nni.compression.pytorch.pruning import L1NormPruner 11 | from model.model_deployor.deployor import deploy 12 | from nni.compression.pytorch.speedup import ModelSpeedup 13 | from model.model_compressor.compressor import * 14 | 15 | import time 16 | import faulthandler;faulthandler.enable() 17 | import numpy as np 18 | import copy 19 | import torch.nn.utils.prune as prune 20 | def main(): 21 | start = time.time() 22 | model = Pointpillars() 23 | 24 | load_checkpoint(model, '../../checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth', map_location='cpu') 25 | model.cuda() 26 | 27 | model.eval() 28 | 29 | input_names = ['voxels', 'num_points', 'coors'] 30 | output_names = ['scores', 'bbox_preds', 'dir_scores'] 31 | dynamic_axes = {'voxels': {0: 'voxels_num'}, 32 | 'num_points': {0: 'voxels_num'}, 33 | 'coors': {0: 'voxels_num'}} 34 | # dynamic_axes = None 35 | 36 | 37 | pcd = '../../test/test_model_ops/data/kitti/kitti_000008.bin' 38 | checkpoint = '../../checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth' 39 | dataset = 'kitti' 40 | model_name = 'pointpillars' 41 | device = 'cuda:0' 42 | backend = 'onnxruntime' 43 | output = 'pointpillars' 44 | fp16 = False 45 | 46 | data, model_inputs = create_input(pcd, dataset, model_name, device) 47 | 48 | backend_file = deploy(model, model_inputs, input_names, output_names, dynamic_axes, 49 | backend=backend, output_file=output, fp16=fp16, dataset=dataset) 50 | 51 | 52 | # torch_prune 53 | #---------------------------------------- 54 | prune_list = [torch.nn.Conv2d, torch.nn.Linear] 55 | amount_list = [0.3, 0.9] 56 | 57 | torch_prune(model, prune_list, amount_list) 58 | #---------------------------------------- 59 | 60 | torch_out = model(model_inputs[0], model_inputs[1], model_inputs[2]) 61 | 62 | 63 | if backend == 'onnxruntime': 64 | import onnxruntime 65 | 66 | ort_session = onnxruntime.InferenceSession(backend_file) 67 | 68 | input_dict = {} 69 | input_dict['voxels'] = model_inputs[0].cpu().numpy() 70 | input_dict['num_points'] = model_inputs[1].cpu().numpy() 71 | input_dict['coors'] = model_inputs[2].cpu().numpy() 72 | ort_output = ort_session.run(['scores', 'bbox_preds', 'dir_scores'], input_dict) 73 | 74 | outputs = {} 75 | outputs['scores'] = torch.tensor(ort_output[0]) 76 | outputs['bbox_preds'] = torch.tensor(ort_output[1]) 77 | outputs['dir_scores'] = torch.tensor(ort_output[2]) 78 | 79 | print('onnx : inference successful!') 80 | 81 | print(time.time() - start) 82 | 83 | if __name__ == '__main__': 84 | main() -------------------------------------------------------------------------------- /data/data_visualizor/data_visualizor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | # import mayavi.mlab 3 | from os import path as osp 4 | 5 | 6 | import numpy as np 7 | 8 | from .visualization_utils import mkdir_or_exist, _write_obj, _write_oriented_bbox 9 | 10 | 11 | def lidar_visualizor(lidar_data, 12 | out_dir='./', 13 | filename='test', 14 | method='open3d', 15 | show=False, 16 | snapshot=False, 17 | pred_labels=None): 18 | """ 19 | Use different visualize methods to visualize lidar data. 20 | Args: 21 | points: np.array 22 | method: str 23 | return: 24 | 0 25 | Reference: 26 | https://github.com/strawlab/python-pcl 27 | """ 28 | 29 | points = lidar_data['points'] 30 | pred_bboxes = lidar_data['pred_bboxes'] 31 | gt_bboxes = lidar_data['gt_bboxes'] 32 | if method == 'open3d': 33 | result_path = osp.join(out_dir, filename) 34 | mkdir_or_exist(result_path) 35 | 36 | if show: 37 | from .open3d_vis import Visualizer 38 | 39 | vis = Visualizer(points) 40 | if pred_bboxes is not None: 41 | if pred_labels is None: 42 | vis.add_bboxes(bbox3d=pred_bboxes) 43 | else: 44 | palette = np.random.randint( 45 | 0, 255, size=(pred_labels.max() + 1, 3)) / 256 46 | labelDict = {} 47 | for j in range(len(pred_labels)): 48 | i = int(pred_labels[j].numpy()) 49 | if labelDict.get(i) is None: 50 | labelDict[i] = [] 51 | labelDict[i].append(pred_bboxes[j]) 52 | for i in labelDict: 53 | vis.add_bboxes( 54 | bbox3d=np.array(labelDict[i]), 55 | bbox_color=palette[i], 56 | points_in_box_color=palette[i]) 57 | 58 | if gt_bboxes is not None: 59 | vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1)) 60 | show_path = osp.join(result_path, 61 | f'{filename}_online.png') if snapshot else None 62 | vis.show(show_path) 63 | 64 | if points is not None: 65 | _write_obj(points, osp.join(result_path, f'{filename}_points.obj')) 66 | 67 | if gt_bboxes is not None: 68 | # bottom center to gravity center 69 | gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2 70 | 71 | _write_oriented_bbox(gt_bboxes, 72 | osp.join(result_path, f'{filename}_gt.obj')) 73 | 74 | if pred_bboxes is not None: 75 | # bottom center to gravity center 76 | pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2 77 | 78 | _write_oriented_bbox(pred_bboxes, 79 | osp.join(result_path, f'{filename}_pred.obj')) 80 | 81 | return 0 82 | 83 | 84 | -------------------------------------------------------------------------------- /mmdet3d/models/model_utils/edge_fusion_module.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.cnn import ConvModule 3 | from mmcv.runner import BaseModule 4 | from torch import nn as nn 5 | from torch.nn import functional as F 6 | 7 | 8 | class EdgeFusionModule(BaseModule): 9 | """Edge Fusion Module for feature map. 10 | 11 | Args: 12 | out_channels (int): The number of output channels. 13 | feat_channels (int): The number of channels in feature map 14 | during edge feature fusion. 15 | kernel_size (int, optional): Kernel size of convolution. 16 | Default: 3. 17 | act_cfg (dict, optional): Config of activation. 18 | Default: dict(type='ReLU'). 19 | norm_cfg (dict, optional): Config of normalization. 20 | Default: dict(type='BN1d')). 21 | """ 22 | 23 | def __init__(self, 24 | out_channels, 25 | feat_channels, 26 | kernel_size=3, 27 | act_cfg=dict(type='ReLU'), 28 | norm_cfg=dict(type='BN1d')): 29 | super().__init__() 30 | self.edge_convs = nn.Sequential( 31 | ConvModule( 32 | feat_channels, 33 | feat_channels, 34 | kernel_size=kernel_size, 35 | padding=kernel_size // 2, 36 | conv_cfg=dict(type='Conv1d'), 37 | norm_cfg=norm_cfg, 38 | act_cfg=act_cfg), 39 | nn.Conv1d(feat_channels, out_channels, kernel_size=1)) 40 | self.feat_channels = feat_channels 41 | 42 | def forward(self, features, fused_features, edge_indices, edge_lens, 43 | output_h, output_w): 44 | """Forward pass. 45 | 46 | Args: 47 | features (torch.Tensor): Different representative features 48 | for fusion. 49 | fused_features (torch.Tensor): Different representative 50 | features to be fused. 51 | edge_indices (torch.Tensor): Batch image edge indices. 52 | edge_lens (list[int]): List of edge length of each image. 53 | output_h (int): Height of output feature map. 54 | output_w (int): Width of output feature map. 55 | 56 | Returns: 57 | torch.Tensor: Fused feature maps. 58 | """ 59 | batch_size = features.shape[0] 60 | # normalize 61 | grid_edge_indices = edge_indices.view(batch_size, -1, 1, 2).float() 62 | grid_edge_indices[..., 0] = \ 63 | grid_edge_indices[..., 0] / (output_w - 1) * 2 - 1 64 | grid_edge_indices[..., 1] = \ 65 | grid_edge_indices[..., 1] / (output_h - 1) * 2 - 1 66 | 67 | # apply edge fusion 68 | edge_features = F.grid_sample( 69 | features, grid_edge_indices, align_corners=True).squeeze(-1) 70 | edge_output = self.edge_convs(edge_features) 71 | 72 | for k in range(batch_size): 73 | edge_indice_k = edge_indices[k, :edge_lens[k]] 74 | fused_features[k, :, edge_indice_k[:, 1], 75 | edge_indice_k[:, 0]] += edge_output[ 76 | k, :, :edge_lens[k]] 77 | 78 | return fused_features 79 | -------------------------------------------------------------------------------- /mmdet3d/models/decode_heads/pointnet2_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.cnn.bricks import ConvModule 3 | from torch import nn as nn 4 | 5 | from mmdet3d.ops import PointFPModule 6 | from ..builder import HEADS 7 | from .decode_head import Base3DDecodeHead 8 | 9 | 10 | @HEADS.register_module() 11 | class PointNet2Head(Base3DDecodeHead): 12 | r"""PointNet2 decoder head. 13 | 14 | Decoder head used in `PointNet++ `_. 15 | Refer to the `official code `_. 16 | 17 | Args: 18 | fp_channels (tuple[tuple[int]]): Tuple of mlp channels in FP modules. 19 | fp_norm_cfg (dict): Config of norm layers used in FP modules. 20 | Default: dict(type='BN2d'). 21 | """ 22 | 23 | def __init__(self, 24 | fp_channels=((768, 256, 256), (384, 256, 256), 25 | (320, 256, 128), (128, 128, 128, 128)), 26 | fp_norm_cfg=dict(type='BN2d'), 27 | **kwargs): 28 | super(PointNet2Head, self).__init__(**kwargs) 29 | 30 | self.num_fp = len(fp_channels) 31 | self.FP_modules = nn.ModuleList() 32 | for cur_fp_mlps in fp_channels: 33 | self.FP_modules.append( 34 | PointFPModule(mlp_channels=cur_fp_mlps, norm_cfg=fp_norm_cfg)) 35 | 36 | # https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_sem_seg.py#L40 37 | self.pre_seg_conv = ConvModule( 38 | fp_channels[-1][-1], 39 | self.channels, 40 | kernel_size=1, 41 | bias=True, 42 | conv_cfg=self.conv_cfg, 43 | norm_cfg=self.norm_cfg, 44 | act_cfg=self.act_cfg) 45 | 46 | def _extract_input(self, feat_dict): 47 | """Extract inputs from features dictionary. 48 | 49 | Args: 50 | feat_dict (dict): Feature dict from backbone. 51 | 52 | Returns: 53 | list[torch.Tensor]: Coordinates of multiple levels of points. 54 | list[torch.Tensor]: Features of multiple levels of points. 55 | """ 56 | sa_xyz = feat_dict['sa_xyz'] 57 | sa_features = feat_dict['sa_features'] 58 | assert len(sa_xyz) == len(sa_features) 59 | 60 | return sa_xyz, sa_features 61 | 62 | def forward(self, feat_dict): 63 | """Forward pass. 64 | 65 | Args: 66 | feat_dict (dict): Feature dict from backbone. 67 | 68 | Returns: 69 | torch.Tensor: Segmentation map of shape [B, num_classes, N]. 70 | """ 71 | sa_xyz, sa_features = self._extract_input(feat_dict) 72 | 73 | # https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_sem_seg.py#L24 74 | sa_features[0] = None 75 | 76 | fp_feature = sa_features[-1] 77 | 78 | for i in range(self.num_fp): 79 | # consume the points in a bottom-up manner 80 | fp_feature = self.FP_modules[i](sa_xyz[-(i + 2)], sa_xyz[-(i + 1)], 81 | sa_features[-(i + 2)], fp_feature) 82 | output = self.pre_seg_conv(fp_feature) 83 | output = self.cls_seg(output) 84 | 85 | return output 86 | -------------------------------------------------------------------------------- /model/model_compressor/demo_static_quant.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | CURRENT_DIR = os.path.split(os.path.abspath(__file__))[0] 5 | config_path = CURRENT_DIR.rsplit('/', 2)[0] 6 | sys.path.append(config_path) 7 | from deephub.detection_model import Pointpillars, Centerpoint 8 | from mmcv.runner import load_checkpoint 9 | from model.model_deployor.deployor_utils import create_input 10 | from nni.compression.pytorch.pruning import L1NormPruner 11 | from model.model_deployor.deployor import deploy 12 | from nni.compression.pytorch.speedup import ModelSpeedup 13 | from model.model_compressor.compressor import * 14 | 15 | import time 16 | import faulthandler;faulthandler.enable() 17 | import numpy as np 18 | import copy 19 | import torch.nn.utils.prune as prune 20 | def main(): 21 | start = time.time() 22 | model = Pointpillars() 23 | 24 | load_checkpoint(model, '../../checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth', map_location='cpu') 25 | model.cpu() 26 | 27 | model.eval() 28 | 29 | input_names = ['voxels', 'num_points', 'coors'] 30 | output_names = ['scores', 'bbox_preds', 'dir_scores'] 31 | dynamic_axes = {'voxels': {0: 'voxels_num'}, 32 | 'num_points': {0: 'voxels_num'}, 33 | 'coors': {0: 'voxels_num'}} 34 | # dynamic_axes = None 35 | 36 | pcd = '../../test/test_model_ops/data/kitti/kitti_000008.bin' 37 | checkpoint = '../../checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth' 38 | dataset = 'kitti' 39 | model_name = 'pointpillars' 40 | device = 'cpu' 41 | backend = 'onnxruntime' 42 | output = 'pointpillars' 43 | fp16 = False 44 | 45 | data, model_inputs = create_input(pcd, dataset, model_name, device) 46 | 47 | backend_file = deploy(model, model_inputs, input_names, output_names, dynamic_axes, 48 | backend=backend, output_file=output, fp16=fp16, dataset=dataset) 49 | 50 | #2 static quant (torch) 51 | #---------------------------------------- 52 | 53 | input_data = [model_inputs[0], model_inputs[1], model_inputs[2]] 54 | 55 | model_int8 = static_quant(model, input_data) 56 | 57 | torch_out = model_int8(model_inputs[0], model_inputs[1], model_inputs[2]) 58 | #---------------------------------------- 59 | 60 | 61 | # torch_out = model(model_inputs[0], model_inputs[1], model_inputs[2]) 62 | 63 | 64 | if backend == 'onnxruntime': 65 | import onnxruntime 66 | 67 | ort_session = onnxruntime.InferenceSession(backend_file) 68 | 69 | input_dict = {} 70 | input_dict['voxels'] = model_inputs[0].cpu().numpy() 71 | input_dict['num_points'] = model_inputs[1].cpu().numpy() 72 | input_dict['coors'] = model_inputs[2].cpu().numpy() 73 | ort_output = ort_session.run(['scores', 'bbox_preds', 'dir_scores'], input_dict) 74 | 75 | outputs = {} 76 | outputs['scores'] = torch.tensor(ort_output[0]) 77 | outputs['bbox_preds'] = torch.tensor(ort_output[1]) 78 | outputs['dir_scores'] = torch.tensor(ort_output[2]) 79 | 80 | print('onnx : inference successful!') 81 | 82 | print(time.time() - start) 83 | 84 | if __name__ == '__main__': 85 | main() -------------------------------------------------------------------------------- /deephub/detection_model/middle_encoders/pillar_scatter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | from mmcv.runner import auto_fp16 4 | from torch import nn 5 | 6 | 7 | class PointPillarsScatter(nn.Module): 8 | """Point Pillar's Scatter. 9 | 10 | Converts learned features from dense tensor to sparse pseudo image. 11 | 12 | Args: 13 | in_channels (int): Channels of input features. 14 | output_shape (list[int]): Required output shape of features. 15 | """ 16 | 17 | def __init__(self, in_channels, output_shape): 18 | super().__init__() 19 | self.output_shape = output_shape 20 | self.ny = output_shape[0] 21 | self.nx = output_shape[1] 22 | self.in_channels = in_channels 23 | self.fp16_enabled = False 24 | 25 | @auto_fp16(apply_to=('voxel_features', )) 26 | def forward(self, voxel_features, coors, batch_size=None): 27 | """Foraward function to scatter features.""" 28 | # TODO: rewrite the function in a batch manner 29 | # no need to deal with different batch cases 30 | if batch_size is not None: 31 | return self.forward_batch(voxel_features, coors, batch_size) 32 | else: 33 | return self.forward_single(voxel_features, coors) 34 | 35 | def forward_single(self, voxel_features, coors): 36 | """Scatter features of single sample. 37 | 38 | Args: 39 | voxel_features (torch.Tensor): Voxel features in shape (N, M, C). 40 | coors (torch.Tensor): Coordinates of each voxel. 41 | The first column indicates the sample ID. 42 | """ 43 | # Create the canvas for this sample 44 | canvas = torch.zeros( 45 | self.in_channels, 46 | self.nx * self.ny, 47 | dtype=voxel_features.dtype, 48 | device=voxel_features.device) 49 | 50 | indices = coors[:, 2] * self.nx + coors[:, 3] 51 | indices = indices.long() 52 | voxels = voxel_features.t() 53 | # Now scatter the blob back to the canvas. 54 | canvas[:, indices] = voxels 55 | # Undo the column stacking to final 4-dim tensor 56 | canvas = canvas.view(1, self.in_channels, self.ny, self.nx) 57 | return canvas 58 | 59 | def forward_batch(self, voxel_features, coors, batch_size=1): 60 | """Scatter features of single sample. 61 | 62 | Args: 63 | voxel_features (torch.Tensor): Voxel features from voxel encoder layer. 64 | coors (torch.Tensor): Coordinates of each voxel. 65 | The first column indicates the sample ID. 66 | batch_size (int): Number of samples in the current batch. 67 | """ 68 | canvas = torch.zeros( 69 | self.in_channels, 70 | self.nx * self.ny, 71 | dtype=voxel_features.dtype, 72 | device=voxel_features.device) 73 | 74 | indices = coors[:, 2] * self.nx + coors[:, 3] 75 | indices = indices.long() 76 | voxels = voxel_features.t() 77 | # Now scatter the blob back to the canvas. 78 | canvas[:, indices] = voxels 79 | # Undo the column stacking to final 4-dim tensor 80 | canvas = canvas.view(1, self.in_channels, self.ny, self.nx) 81 | return canvas 82 | -------------------------------------------------------------------------------- /model/model_deployor/deployor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import importlib 3 | if importlib.util.find_spec('tensorrt') is not None: 4 | from model.model_deployor.onnx2tensorrt import create_trt_engine, save_trt_engine 5 | 6 | trt_input_shapes = { 7 | 'kitti': { 8 | 'voxels': { 9 | 'min_shape': [2000, 32, 4], 10 | 'opt_shape': [5000, 32, 4], 11 | 'max_shape': [9000, 32, 4] 12 | }, 13 | 'num_points': { 14 | 'min_shape': [2000], 15 | 'opt_shape': [5000], 16 | 'max_shape': [9000] 17 | }, 18 | 'coors': { 19 | 'min_shape': [2000, 4], 20 | 'opt_shape': [5000, 4], 21 | 'max_shape': [9000, 4] 22 | } 23 | }, 24 | 'nuscenes': { 25 | 'voxels': { 26 | 'min_shape': [5000, 20, 4], 27 | 'opt_shape': [20000, 20, 4], 28 | 'max_shape': [30000, 20, 4] 29 | }, 30 | 'num_points': { 31 | 'min_shape': [5000], 32 | 'opt_shape': [20000], 33 | 'max_shape': [30000] 34 | }, 35 | 'coors': { 36 | 'min_shape': [5000, 4], 37 | 'opt_shape': [20000, 4], 38 | 'max_shape': [30000, 4] 39 | } 40 | } 41 | } 42 | 43 | def deploy(model, 44 | model_inputs, 45 | input_names, 46 | output_names, 47 | dynamic_axes, 48 | backend='onnxruntime', 49 | output_file='end2end', 50 | verbose=False, 51 | fp16=False, 52 | dataset='kitti'): 53 | """ 54 | Deploy pytorch model to different backends. 55 | Args: 56 | model: torch.nn.module 57 | model_inputs: tensor 58 | input_names: deployment model input names 59 | output_names: deployment model output names 60 | dynamic_axes: specifies the dynamic dimension of the deployment model 61 | backend: specify convert backend 62 | output_file: output file name 63 | fp16: TensorRT fp16 64 | dataset: dataset name 65 | Return: 66 | backend file name 67 | Reference: 68 | https://github.com/open-mmlab/mmdeploy/blob/master/tools/deploy.py 69 | """ 70 | assert backend in ['onnxruntime', 'tensorrt'], 'This backend isn\'t supported now!' 71 | 72 | output_file = output_file + '.onnx' 73 | torch.onnx.export( 74 | model, 75 | model_inputs, 76 | output_file, 77 | export_params=True, 78 | input_names=input_names, 79 | output_names=output_names, 80 | opset_version=11, 81 | dynamic_axes=dynamic_axes, 82 | keep_initializers_as_inputs=False, 83 | verbose=verbose) 84 | if backend == 'onnxruntime': 85 | return output_file 86 | if backend == 'tensorrt': 87 | engine = create_trt_engine( 88 | output_file, 89 | input_shapes=trt_input_shapes[dataset], 90 | fp16_mode=fp16, 91 | int8_mode=False, 92 | int8_param={}, 93 | max_workspace_size=1073741824, 94 | device_id=0) 95 | output_file = output_file.replace('onnx', 'trt') 96 | save_trt_engine(engine, output_file) 97 | return output_file -------------------------------------------------------------------------------- /mmdet3d/models/roi_heads/base_3droi_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | from mmcv.runner import BaseModule 5 | 6 | 7 | class Base3DRoIHead(BaseModule, metaclass=ABCMeta): 8 | """Base class for 3d RoIHeads.""" 9 | 10 | def __init__(self, 11 | bbox_head=None, 12 | mask_roi_extractor=None, 13 | mask_head=None, 14 | train_cfg=None, 15 | test_cfg=None, 16 | pretrained=None, 17 | init_cfg=None): 18 | super(Base3DRoIHead, self).__init__(init_cfg=init_cfg) 19 | self.train_cfg = train_cfg 20 | self.test_cfg = test_cfg 21 | 22 | if bbox_head is not None: 23 | self.init_bbox_head(bbox_head) 24 | 25 | if mask_head is not None: 26 | self.init_mask_head(mask_roi_extractor, mask_head) 27 | 28 | self.init_assigner_sampler() 29 | 30 | @property 31 | def with_bbox(self): 32 | """bool: whether the RoIHead has box head""" 33 | return hasattr(self, 'bbox_head') and self.bbox_head is not None 34 | 35 | @property 36 | def with_mask(self): 37 | """bool: whether the RoIHead has mask head""" 38 | return hasattr(self, 'mask_head') and self.mask_head is not None 39 | 40 | @abstractmethod 41 | def init_bbox_head(self): 42 | """Initialize the box head.""" 43 | pass 44 | 45 | @abstractmethod 46 | def init_mask_head(self): 47 | """Initialize maek head.""" 48 | pass 49 | 50 | @abstractmethod 51 | def init_assigner_sampler(self): 52 | """Initialize assigner and sampler.""" 53 | pass 54 | 55 | @abstractmethod 56 | def forward_train(self, 57 | x, 58 | img_metas, 59 | proposal_list, 60 | gt_bboxes, 61 | gt_labels, 62 | gt_bboxes_ignore=None, 63 | **kwargs): 64 | """Forward function during training. 65 | 66 | Args: 67 | x (dict): Contains features from the first stage. 68 | img_metas (list[dict]): Meta info of each image. 69 | proposal_list (list[dict]): Proposal information from rpn. 70 | gt_bboxes (list[:obj:`BaseInstance3DBoxes`]): 71 | GT bboxes of each sample. The bboxes are encapsulated 72 | by 3D box structures. 73 | gt_labels (list[torch.LongTensor]): GT labels of each sample. 74 | gt_bboxes_ignore (list[torch.Tensor], optional): 75 | Ground truth boxes to be ignored. 76 | 77 | Returns: 78 | dict[str, torch.Tensor]: Losses from each head. 79 | """ 80 | pass 81 | 82 | def simple_test(self, 83 | x, 84 | proposal_list, 85 | img_metas, 86 | proposals=None, 87 | rescale=False, 88 | **kwargs): 89 | """Test without augmentation.""" 90 | pass 91 | 92 | def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): 93 | """Test with augmentations. 94 | 95 | If rescale is False, then returned bboxes and masks will fit the scale 96 | of imgs[0]. 97 | """ 98 | pass 99 | -------------------------------------------------------------------------------- /mmdet3d/models/utils/edge_indices.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def get_edge_indices(img_metas, 7 | downsample_ratio, 8 | step=1, 9 | pad_mode='default', 10 | dtype=np.float32, 11 | device='cpu'): 12 | """Function to filter the objects label outside the image. 13 | The edge_indices are generated using numpy on cpu rather 14 | than on CUDA due to the latency issue. When batch size = 8, 15 | this function with numpy array is ~8 times faster than that 16 | with CUDA tensor (0.09s and 0.72s in 100 runs). 17 | 18 | Args: 19 | img_metas (list[dict]): Meta information of each image, e.g., 20 | image size, scaling factor, etc. 21 | downsample_ratio (int): Downsample ratio of output feature, 22 | step (int, optional): Step size used for generateing 23 | edge indices. Default: 1. 24 | pad_mode (str, optional): Padding mode during data pipeline. 25 | Default: 'default'. 26 | dtype (torch.dtype, optional): Dtype of edge indices tensor. 27 | Default: np.float32. 28 | device (str, optional): Device of edge indices tensor. 29 | Default: 'cpu'. 30 | 31 | Returns: 32 | list[Tensor]: Edge indices for each image in batch data. 33 | """ 34 | edge_indices_list = [] 35 | for i in range(len(img_metas)): 36 | img_shape = img_metas[i]['img_shape'] 37 | pad_shape = img_metas[i]['pad_shape'] 38 | h, w = img_shape[:2] 39 | pad_h, pad_w = pad_shape 40 | edge_indices = [] 41 | 42 | if pad_mode == 'default': 43 | x_min = 0 44 | y_min = 0 45 | x_max = (w - 1) // downsample_ratio 46 | y_max = (h - 1) // downsample_ratio 47 | elif pad_mode == 'center': 48 | x_min = np.ceil((pad_w - w) / 2 * downsample_ratio) 49 | y_min = np.ceil((pad_h - h) / 2 * downsample_ratio) 50 | x_max = x_min + w // downsample_ratio 51 | y_max = y_min + h // downsample_ratio 52 | else: 53 | raise NotImplementedError 54 | 55 | # left 56 | y = np.arange(y_min, y_max, step, dtype=dtype) 57 | x = np.ones(len(y)) * x_min 58 | 59 | edge_indices_edge = np.stack((x, y), axis=1) 60 | edge_indices.append(edge_indices_edge) 61 | 62 | # bottom 63 | x = np.arange(x_min, x_max, step, dtype=dtype) 64 | y = np.ones(len(x)) * y_max 65 | 66 | edge_indices_edge = np.stack((x, y), axis=1) 67 | edge_indices.append(edge_indices_edge) 68 | 69 | # right 70 | y = np.arange(y_max, y_min, -step, dtype=dtype) 71 | x = np.ones(len(y)) * x_max 72 | 73 | edge_indices_edge = np.stack((x, y), axis=1) 74 | edge_indices.append(edge_indices_edge) 75 | 76 | # top 77 | x = np.arange(x_max, x_min, -step, dtype=dtype) 78 | y = np.ones(len(x)) * y_min 79 | 80 | edge_indices_edge = np.stack((x, y), axis=1) 81 | edge_indices.append(edge_indices_edge) 82 | 83 | edge_indices = \ 84 | np.concatenate([index for index in edge_indices], axis=0) 85 | edge_indices = torch.from_numpy(edge_indices).to(device).long() 86 | edge_indices_list.append(edge_indices) 87 | 88 | return edge_indices_list 89 | -------------------------------------------------------------------------------- /mmdet3d/models/utils/gen_keypoints.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmdet3d.core.bbox import points_cam2img 5 | 6 | 7 | def get_keypoints(gt_bboxes_3d_list, 8 | centers2d_list, 9 | img_metas, 10 | use_local_coords=True): 11 | """Function to filter the objects label outside the image. 12 | 13 | Args: 14 | gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, 15 | shape (num_gt, 4). 16 | centers2d_list (list[Tensor]): Projected 3D centers onto 2D image, 17 | shape (num_gt, 2). 18 | img_metas (list[dict]): Meta information of each image, e.g., 19 | image size, scaling factor, etc. 20 | use_local_coords (bool, optional): Wheher to use local coordinates 21 | for keypoints. Default: True. 22 | 23 | Returns: 24 | tuple[list[Tensor]]: It contains two elements, the first is the 25 | keypoints for each projected 2D bbox in batch data. The second is 26 | the visible mask of depth calculated by keypoints. 27 | """ 28 | 29 | assert len(gt_bboxes_3d_list) == len(centers2d_list) 30 | bs = len(gt_bboxes_3d_list) 31 | keypoints2d_list = [] 32 | keypoints_depth_mask_list = [] 33 | 34 | for i in range(bs): 35 | gt_bboxes_3d = gt_bboxes_3d_list[i] 36 | centers2d = centers2d_list[i] 37 | img_shape = img_metas[i]['img_shape'] 38 | cam2img = img_metas[i]['cam2img'] 39 | h, w = img_shape[:2] 40 | # (N, 8, 3) 41 | corners3d = gt_bboxes_3d.corners 42 | top_centers3d = torch.mean(corners3d[:, [0, 1, 4, 5], :], dim=1) 43 | bot_centers3d = torch.mean(corners3d[:, [2, 3, 6, 7], :], dim=1) 44 | # (N, 2, 3) 45 | top_bot_centers3d = torch.stack((top_centers3d, bot_centers3d), dim=1) 46 | keypoints3d = torch.cat((corners3d, top_bot_centers3d), dim=1) 47 | # (N, 10, 2) 48 | keypoints2d = points_cam2img(keypoints3d, cam2img) 49 | 50 | # keypoints mask: keypoints must be inside 51 | # the image and in front of the camera 52 | keypoints_x_visible = (keypoints2d[..., 0] >= 0) & ( 53 | keypoints2d[..., 0] <= w - 1) 54 | keypoints_y_visible = (keypoints2d[..., 1] >= 0) & ( 55 | keypoints2d[..., 1] <= h - 1) 56 | keypoints_z_visible = (keypoints3d[..., -1] > 0) 57 | 58 | # (N, 1O) 59 | keypoints_visible = keypoints_x_visible & \ 60 | keypoints_y_visible & keypoints_z_visible 61 | # center, diag-02, diag-13 62 | keypoints_depth_valid = torch.stack( 63 | (keypoints_visible[:, [8, 9]].all(dim=1), 64 | keypoints_visible[:, [0, 3, 5, 6]].all(dim=1), 65 | keypoints_visible[:, [1, 2, 4, 7]].all(dim=1)), 66 | dim=1) 67 | keypoints_visible = keypoints_visible.float() 68 | 69 | if use_local_coords: 70 | keypoints2d = torch.cat((keypoints2d - centers2d.unsqueeze(1), 71 | keypoints_visible.unsqueeze(-1)), 72 | dim=2) 73 | else: 74 | keypoints2d = torch.cat( 75 | (keypoints2d, keypoints_visible.unsqueeze(-1)), dim=2) 76 | 77 | keypoints2d_list.append(keypoints2d) 78 | keypoints_depth_mask_list.append(keypoints_depth_valid) 79 | 80 | return (keypoints2d_list, keypoints_depth_mask_list) 81 | -------------------------------------------------------------------------------- /mmdet3d/models/dense_heads/base_mono3d_dense_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | from mmcv.runner import BaseModule 5 | 6 | 7 | class BaseMono3DDenseHead(BaseModule, metaclass=ABCMeta): 8 | """Base class for Monocular 3D DenseHeads.""" 9 | 10 | def __init__(self, init_cfg=None): 11 | super(BaseMono3DDenseHead, self).__init__(init_cfg=init_cfg) 12 | 13 | @abstractmethod 14 | def loss(self, **kwargs): 15 | """Compute losses of the head.""" 16 | pass 17 | 18 | @abstractmethod 19 | def get_bboxes(self, **kwargs): 20 | """Transform network output for a batch into bbox predictions.""" 21 | pass 22 | 23 | def forward_train(self, 24 | x, 25 | img_metas, 26 | gt_bboxes, 27 | gt_labels=None, 28 | gt_bboxes_3d=None, 29 | gt_labels_3d=None, 30 | centers2d=None, 31 | depths=None, 32 | attr_labels=None, 33 | gt_bboxes_ignore=None, 34 | proposal_cfg=None, 35 | **kwargs): 36 | """ 37 | Args: 38 | x (list[Tensor]): Features from FPN. 39 | img_metas (list[dict]): Meta information of each image, e.g., 40 | image size, scaling factor, etc. 41 | gt_bboxes (list[Tensor]): Ground truth bboxes of the image, 42 | shape (num_gts, 4). 43 | gt_labels (list[Tensor]): Ground truth labels of each box, 44 | shape (num_gts,). 45 | gt_bboxes_3d (list[Tensor]): 3D ground truth bboxes of the image, 46 | shape (num_gts, self.bbox_code_size). 47 | gt_labels_3d (list[Tensor]): 3D ground truth labels of each box, 48 | shape (num_gts,). 49 | centers2d (list[Tensor]): Projected 3D center of each box, 50 | shape (num_gts, 2). 51 | depths (list[Tensor]): Depth of projected 3D center of each box, 52 | shape (num_gts,). 53 | attr_labels (list[Tensor]): Attribute labels of each box, 54 | shape (num_gts,). 55 | gt_bboxes_ignore (list[Tensor]): Ground truth bboxes to be 56 | ignored, shape (num_ignored_gts, 4). 57 | proposal_cfg (mmcv.Config): Test / postprocessing configuration, 58 | if None, test_cfg would be used 59 | 60 | Returns: 61 | tuple: 62 | losses: (dict[str, Tensor]): A dictionary of loss components. 63 | proposal_list (list[Tensor]): Proposals of each image. 64 | """ 65 | outs = self(x) 66 | if gt_labels is None: 67 | loss_inputs = outs + (gt_bboxes, gt_bboxes_3d, centers2d, depths, 68 | attr_labels, img_metas) 69 | else: 70 | loss_inputs = outs + (gt_bboxes, gt_labels, gt_bboxes_3d, 71 | gt_labels_3d, centers2d, depths, attr_labels, 72 | img_metas) 73 | losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) 74 | if proposal_cfg is None: 75 | return losses 76 | else: 77 | proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg) 78 | return losses, proposal_list 79 | -------------------------------------------------------------------------------- /deephub/detection_model/pointpillars.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.runner import BaseModule 3 | from .voxel_encoders import PillarFeatureNet 4 | from .middle_encoders import PointPillarsScatter 5 | from .backbones import SECOND 6 | from .necks import SECONDFPN 7 | from .heads import Anchor3DHead 8 | 9 | class Pointpillars(BaseModule): 10 | """Backbone network for SECOND/PointPillars/PartA2/MVXNet. 11 | 12 | Args: 13 | in_channels (int): Input channels. 14 | out_channels (list[int]): Output channels for multi-scale feature maps. 15 | layer_nums (list[int]): Number of layers in each stage. 16 | layer_strides (list[int]): Strides of each stage. 17 | norm_cfg (dict): Config dict of normalization layers. 18 | conv_cfg (dict): Config dict of convolutional layers. 19 | """ 20 | 21 | def __init__(self, 22 | init_cfg=None): 23 | super(Pointpillars, self).__init__(init_cfg=init_cfg) 24 | self.voxel_encoder = PillarFeatureNet( 25 | feat_channels=[64], 26 | voxel_size=[0.16, 0.16, 4], 27 | point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1] 28 | ) 29 | self.middle_encoder = PointPillarsScatter( 30 | in_channels=64, 31 | output_shape=[496,432] 32 | ) 33 | self.backbone = SECOND( 34 | in_channels=64, 35 | out_channels=[64, 128, 256] 36 | ) 37 | self.neck = SECONDFPN( 38 | in_channels=[64, 128, 256], 39 | out_channels=[128, 128, 128] 40 | ) 41 | self.bbox_head = Anchor3DHead( 42 | num_classes=1, 43 | in_channels=384, 44 | feat_channels=384 45 | ) 46 | 47 | def forward(self, 48 | voxels, 49 | num_points, 50 | coors): 51 | """Test function without augmentaiton. Rewrite this func to remove model 52 | post process. 53 | 54 | Args: 55 | voxels (torch.Tensor): Point features or raw points in shape (N, M, C). 56 | num_points (torch.Tensor): Number of points in each pillar. 57 | coors (torch.Tensor): Coordinates of each voxel. 58 | 59 | Returns: 60 | List: Result of model. 61 | """ 62 | x = self.extract_feat(voxels, num_points, coors) 63 | bbox_preds, scores, dir_scores = self.bbox_head(x) 64 | return bbox_preds, scores, dir_scores 65 | 66 | def extract_feat(self, 67 | voxels, 68 | num_points, 69 | coors): 70 | """Extract features from points. Rewrite this func to remove voxelize op. 71 | 72 | Args: 73 | voxels (torch.Tensor): Point features or raw points in shape (N, M, C). 74 | num_points (torch.Tensor): Number of points in each pillar. 75 | coors (torch.Tensor): Coordinates of each voxel. 76 | 77 | Returns: 78 | torch.Tensor: Features from points. 79 | """ 80 | voxel_features = self.voxel_encoder(voxels, num_points, coors) 81 | batch_size = coors[-1, 0] + 1 # refactor 82 | # assert batch_size == 1 83 | x = self.middle_encoder(voxel_features, coors, batch_size) 84 | x = self.backbone(x) 85 | x = self.neck(x) 86 | return x 87 | 88 | -------------------------------------------------------------------------------- /mmdet3d/models/necks/pointnet2_fp_neck.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.runner import BaseModule 3 | from torch import nn as nn 4 | 5 | from mmdet3d.ops import PointFPModule 6 | from ..builder import NECKS 7 | 8 | 9 | @NECKS.register_module() 10 | class PointNetFPNeck(BaseModule): 11 | r"""PointNet FP Module used in PointRCNN. 12 | 13 | Refer to the `official code `_. 14 | 15 | .. code-block:: none 16 | 17 | sa_n ---------------------------------------- 18 | | 19 | ... --------------------------------- | 20 | | | 21 | sa_1 ------------- | | 22 | | | | 23 | sa_0 -> fp_0 -> fp_module ->fp_1 -> ... -> fp_module -> fp_n 24 | 25 | sa_n including sa_xyz (torch.Tensor) and sa_features (torch.Tensor) 26 | fp_n including fp_xyz (torch.Tensor) and fp_features (torch.Tensor) 27 | 28 | Args: 29 | fp_channels (tuple[tuple[int]]): Tuple of mlp channels in FP modules. 30 | init_cfg (dict or list[dict], optional): Initialization config dict. 31 | Default: None 32 | """ 33 | 34 | def __init__(self, fp_channels, init_cfg=None): 35 | super(PointNetFPNeck, self).__init__(init_cfg=init_cfg) 36 | 37 | self.num_fp = len(fp_channels) 38 | self.FP_modules = nn.ModuleList() 39 | for cur_fp_mlps in fp_channels: 40 | self.FP_modules.append(PointFPModule(mlp_channels=cur_fp_mlps)) 41 | 42 | def _extract_input(self, feat_dict): 43 | """Extract inputs from features dictionary. 44 | 45 | Args: 46 | feat_dict (dict): Feature dict from backbone, which may contain 47 | the following keys and values: 48 | 49 | - sa_xyz (list[torch.Tensor]): Points of each sa module 50 | in shape (N, 3). 51 | - sa_features (list[torch.Tensor]): Output features of 52 | each sa module in shape (N, M). 53 | 54 | Returns: 55 | list[torch.Tensor]: Coordinates of multiple levels of points. 56 | list[torch.Tensor]: Features of multiple levels of points. 57 | """ 58 | sa_xyz = feat_dict['sa_xyz'] 59 | sa_features = feat_dict['sa_features'] 60 | assert len(sa_xyz) == len(sa_features) 61 | 62 | return sa_xyz, sa_features 63 | 64 | def forward(self, feat_dict): 65 | """Forward pass. 66 | 67 | Args: 68 | feat_dict (dict): Feature dict from backbone. 69 | 70 | Returns: 71 | dict[str, torch.Tensor]: Outputs of the Neck. 72 | 73 | - fp_xyz (torch.Tensor): The coordinates of fp features. 74 | - fp_features (torch.Tensor): The features from the last 75 | feature propagation layers. 76 | """ 77 | sa_xyz, sa_features = self._extract_input(feat_dict) 78 | 79 | fp_feature = sa_features[-1] 80 | fp_xyz = sa_xyz[-1] 81 | 82 | for i in range(self.num_fp): 83 | # consume the points in a bottom-up manner 84 | fp_feature = self.FP_modules[i](sa_xyz[-(i + 2)], sa_xyz[-(i + 1)], 85 | sa_features[-(i + 2)], fp_feature) 86 | fp_xyz = sa_xyz[-(i + 2)] 87 | 88 | ret = dict(fp_xyz=fp_xyz, fp_features=fp_feature) 89 | return ret 90 | -------------------------------------------------------------------------------- /data/data_denoisor/data_denoisor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from .denoisor_dmr_utils import run_denoise_large_pointcloud, run_denoise_middle_pointcloud, run_denoise 5 | from .denoisor_pcpnet_utils import ResPCPNet 6 | 7 | def lidar_denoisor(lidar_data, method): 8 | """ 9 | Use different lidar denoise methods to denoise lidar data. 10 | Args: 11 | lidar_data: dict 12 | method: str 13 | return: 14 | demoised lidar_data 15 | Reference: 16 | https://github.com/luost26/DMRDenoise 17 | https://github.com/mrakotosaon/pointcleannet 18 | """ 19 | points = lidar_data['points'] 20 | 21 | if method == 'pcp': 22 | param_filename = 'deephub/denoisy_model/pcp/pretrained/denoisingModel/PointCleanNet_params.pth' 23 | model_filename = 'deephub/denoisy_model/pcp/pretrained/denoisingModel/PointCleanNet_model.pth' 24 | trainopt = torch.load(param_filename) 25 | pred_dim = 0 26 | output_pred_ind = [] 27 | for o in trainopt.outputs: 28 | if o in ['clean_points']: 29 | output_pred_ind.append(pred_dim) 30 | pred_dim += 3 31 | else: 32 | raise ValueError('Unknown output: %s' % (o)) 33 | 34 | regressor = ResPCPNet( 35 | num_points=trainopt.points_per_patch, 36 | output_dim=pred_dim, 37 | use_point_stn=trainopt.use_point_stn, 38 | use_feat_stn=trainopt.use_feat_stn, 39 | sym_op=trainopt.sym_op, 40 | point_tuple=trainopt.point_tuple) 41 | regressor.load_state_dict(torch.load(model_filename)) 42 | 43 | pred, trans, _, _ = regressor(points) 44 | patch_radiuses=torch.FloatTensor([0.05]) 45 | 46 | denoised = pred 47 | 48 | if method == 'dmr': 49 | num_points = points.shape[0] 50 | if num_points >= 120000: 51 | print('[INFO] Denoising large point cloud.') 52 | denoised, downsampled = run_denoise_large_pointcloud( 53 | pc=points, 54 | cluster_size=30000, 55 | patch_size=1000, 56 | ckpt='deephub/denoisy_model/dmr/pretrained/supervised/epoch=153.ckpt', 57 | device='cuda:0', 58 | random_state=0, 59 | expand_knn=16 60 | ) 61 | elif num_points >= 60000: 62 | print('[INFO] Denoising middle-sized point cloud.') 63 | denoised, downsampled = run_denoise_middle_pointcloud( 64 | pc=points, 65 | num_splits=2, 66 | patch_size=1000, 67 | ckpt='deephub/denoisy_model/dmr/pretrained/supervised/epoch=153.ckpt', 68 | device='cuda:0', 69 | random_state=0, 70 | expand_knn=16 71 | ) 72 | elif num_points >= 10000: 73 | print('[INFO] Denoising regular-sized point cloud.') 74 | denoised, downsampled = run_denoise( 75 | pc=points, 76 | patch_size=1000, 77 | ckpt='deephub/denoisy_model/dmr/pretrained/supervised/epoch=153.ckpt', 78 | device='cuda:0', 79 | random_state=0, 80 | expand_knn=16 81 | ) 82 | else: 83 | assert False, "Our pretrained model does not support point clouds with less than 10K points." 84 | 85 | lidar_data['points'] = denoised 86 | 87 | return lidar_data 88 | -------------------------------------------------------------------------------- /model/model_deployor/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Deployor 4 | 5 | We offer deployment of point cloud detection models pointpillars and centerpoint as ONNXRuntime and TensorRT. 6 | 7 | The deployment process is as follows: 8 | 9 | **1.Generate the inputs needed for the model based on the model's config (including reading point cloud files, doing data augmentation, voxelization.)** 10 | 11 | ```python 12 | def create_input(pcd, dataset, model, device): 13 | """Create input for detector. 14 | 15 | Args: 16 | pcd (str): Input pcd file path. 17 | 18 | Returns: 19 | tuple: (data, input), meta information for the input pcd 20 | and model input. 21 | """ 22 | data = read_pcd_file(pcd, test_pipelines[model][dataset], device, box_type_3d='LiDAR') 23 | voxels, num_points, coors = voxelize( 24 | voxel_layers[model][dataset], data['points'][0]) 25 | return data, (voxels, num_points, coors) 26 | ``` 27 | 28 | **2.Rewrite the forward function of the network model to remove the judgments and operations that are not suitable for deployment** 29 | 30 | **3.Convert Pytorch model to intermediate representation ONNX** 31 | 32 | ```python 33 | torch.onnx.export( 34 | model, 35 | model_inputs, 36 | output_file, 37 | export_params=True, 38 | input_names=input_names, 39 | output_names=output_names, 40 | opset_version=11, 41 | dynamic_axes=dynamic_axes, 42 | keep_initializers_as_inputs=False, 43 | verbose=verbose) 44 | ``` 45 | 46 | **4.Convert the intermediate representation ONNX to trt if needed** 47 | 48 | ```python 49 | engine = create_trt_engine( 50 | output_file, 51 | input_shapes=trt_input_shapes[dataset], 52 | fp16_mode=fp16, 53 | int8_mode=False, 54 | int8_param={}, 55 | max_workspace_size=1073741824, 56 | device_id=0) 57 | ``` 58 | 59 | 60 | 61 | ## Usage 62 | 63 | You should first install onnx, onnxruntime as: 64 | 65 | ```shell 66 | pip install onnx 67 | pip install onnxruntime==1.8.1 68 | ``` 69 | 70 | You should first install tensorrt as: 71 | 72 | 1.download TensorRT from [NVIDIA Developer Program Membership Required | NVIDIA Developer](https://developer.nvidia.com/nvidia-tensorrt-download) 73 | 74 | 2.install tensorrt in python 75 | 76 | ```shell 77 | cd /the/path/of/tensorrt/tar/gz/file 78 | tar -zxvf TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz 79 | pip install TensorRT-8.2.3.0/python/tensorrt-8.2.3.0-cp37-none-linux_x86_64.whl 80 | ``` 81 | 82 | 3.add tensorrt into the environment variable 83 | 84 | ```shell 85 | vim .bashrc 86 | export LD_LIBRARY_PATH=../TensorRT-8.2.3.0/lib:$LD_LIBRARY_PATH 87 | export LIBRARY_PATH=../TensorRT-8.2.3.0/lib:$LIBRARY_PATH 88 | source .bashrc 89 | ``` 90 | 91 | 92 | 93 | 94 | 95 | You can then execute the following script to convert the model: 96 | 97 | ```shell 98 | python tools/deploy.py 99 | test/data_tobe_tested/kitti/kitti_000008.bin 100 | checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth 101 | onnxruntime 102 | pointpillars 103 | kitti 104 | pointpillars 105 | ``` 106 | 107 | 108 | 109 | ```shell 110 | python tools/deploy.py 111 | test/data_tobe_tested/kitti/kitti_000008.bin 112 | checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth 113 | tensorrt 114 | pointpillars 115 | kitti 116 | pointpillars 117 | ``` 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /mmdet3d/core/bbox/coders/delta_xyzwhlr_bbox_coder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmdet.core.bbox import BaseBBoxCoder 5 | from mmdet.core.bbox.builder import BBOX_CODERS 6 | 7 | 8 | @BBOX_CODERS.register_module() 9 | class DeltaXYZWLHRBBoxCoder(BaseBBoxCoder): 10 | """Bbox Coder for 3D boxes. 11 | 12 | Args: 13 | code_size (int): The dimension of boxes to be encoded. 14 | """ 15 | 16 | def __init__(self, code_size=7): 17 | super(DeltaXYZWLHRBBoxCoder, self).__init__() 18 | self.code_size = code_size 19 | 20 | @staticmethod 21 | def encode(src_boxes, dst_boxes): 22 | """Get box regression transformation deltas (dx, dy, dz, dx_size, 23 | dy_size, dz_size, dr, dv*) that can be used to transform the 24 | `src_boxes` into the `target_boxes`. 25 | 26 | Args: 27 | src_boxes (torch.Tensor): source boxes, e.g., object proposals. 28 | dst_boxes (torch.Tensor): target of the transformation, e.g., 29 | ground-truth boxes. 30 | 31 | Returns: 32 | torch.Tensor: Box transformation deltas. 33 | """ 34 | box_ndim = src_boxes.shape[-1] 35 | cas, cgs, cts = [], [], [] 36 | if box_ndim > 7: 37 | xa, ya, za, wa, la, ha, ra, *cas = torch.split( 38 | src_boxes, 1, dim=-1) 39 | xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split( 40 | dst_boxes, 1, dim=-1) 41 | cts = [g - a for g, a in zip(cgs, cas)] 42 | else: 43 | xa, ya, za, wa, la, ha, ra = torch.split(src_boxes, 1, dim=-1) 44 | xg, yg, zg, wg, lg, hg, rg = torch.split(dst_boxes, 1, dim=-1) 45 | za = za + ha / 2 46 | zg = zg + hg / 2 47 | diagonal = torch.sqrt(la**2 + wa**2) 48 | xt = (xg - xa) / diagonal 49 | yt = (yg - ya) / diagonal 50 | zt = (zg - za) / ha 51 | lt = torch.log(lg / la) 52 | wt = torch.log(wg / wa) 53 | ht = torch.log(hg / ha) 54 | rt = rg - ra 55 | return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1) 56 | 57 | @staticmethod 58 | def decode(anchors, deltas): 59 | """Apply transformation `deltas` (dx, dy, dz, dx_size, dy_size, 60 | dz_size, dr, dv*) to `boxes`. 61 | 62 | Args: 63 | anchors (torch.Tensor): Parameters of anchors with shape (N, 7). 64 | deltas (torch.Tensor): Encoded boxes with shape 65 | (N, 7+n) [x, y, z, x_size, y_size, z_size, r, velo*]. 66 | 67 | Returns: 68 | torch.Tensor: Decoded boxes. 69 | """ 70 | cas, cts = [], [] 71 | box_ndim = anchors.shape[-1] 72 | if box_ndim > 7: 73 | xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) 74 | xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(deltas, 1, dim=-1) 75 | else: 76 | xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1) 77 | xt, yt, zt, wt, lt, ht, rt = torch.split(deltas, 1, dim=-1) 78 | 79 | za = za + ha / 2 80 | diagonal = torch.sqrt(la**2 + wa**2) 81 | xg = xt * diagonal + xa 82 | yg = yt * diagonal + ya 83 | zg = zt * ha + za 84 | 85 | lg = torch.exp(lt) * la 86 | wg = torch.exp(wt) * wa 87 | hg = torch.exp(ht) * ha 88 | rg = rt + ra 89 | zg = zg - hg / 2 90 | cgs = [t + a for t, a in zip(cts, cas)] 91 | return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1) 92 | --------------------------------------------------------------------------------