├── .gitignore
├── .vscode
└── settings.json
├── LICENSE
├── README.md
├── data
├── kitti
│ └── ImageSets
│ │ ├── test.txt
│ │ ├── train.txt
│ │ └── val.txt
└── waymo
│ └── ImageSets
│ ├── train.txt
│ └── val.txt
├── docs
├── DEMO.md
├── GETTING_STARTED.md
├── INSTALL.md
├── dataset_vs_model.png
├── demo.png
├── model_framework.png
├── multiple_models_demo.png
└── open_mmlab.png
├── pcdet
├── __init__.py
├── config.py
├── datasets
│ ├── __init__.py
│ ├── augmentor
│ │ ├── augmentor_utils.py
│ │ ├── data_augmentor.py
│ │ └── database_sampler.py
│ ├── dataset.py
│ ├── kitti
│ │ ├── kitti_dataset.py
│ │ ├── kitti_object_eval_python
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── eval.py
│ │ │ ├── evaluate.py
│ │ │ ├── kitti_common.py
│ │ │ └── rotate_iou.py
│ │ └── kitti_utils.py
│ ├── nuscenes
│ │ ├── nuscenes_dataset.py
│ │ └── nuscenes_utils.py
│ ├── processor
│ │ ├── data_processor.py
│ │ └── point_feature_encoder.py
│ └── waymo
│ │ ├── waymo_dataset.py
│ │ ├── waymo_eval.py
│ │ └── waymo_utils.py
├── models
│ ├── __init__.py
│ ├── backbones_2d
│ │ ├── __init__.py
│ │ ├── base_bev_backbone.py
│ │ └── map_to_bev
│ │ │ ├── __init__.py
│ │ │ ├── conv2d_collapse.py
│ │ │ ├── height_compression.py
│ │ │ └── pointpillar_scatter.py
│ ├── backbones_3d
│ │ ├── __init__.py
│ │ ├── pfe
│ │ │ ├── __init__.py
│ │ │ └── voxel_set_abstraction.py
│ │ ├── pointnet2_backbone.py
│ │ ├── spconv_backbone.py
│ │ ├── spconv_unet.py
│ │ └── vfe
│ │ │ ├── __init__.py
│ │ │ ├── image_vfe.py
│ │ │ ├── image_vfe_modules
│ │ │ ├── f2v
│ │ │ │ ├── __init__.py
│ │ │ │ ├── frustum_grid_generator.py
│ │ │ │ ├── frustum_to_voxel.py
│ │ │ │ └── sampler.py
│ │ │ └── ffn
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ddn
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ddn_deeplabv3.py
│ │ │ │ └── ddn_template.py
│ │ │ │ ├── ddn_loss
│ │ │ │ ├── __init__.py
│ │ │ │ ├── balancer.py
│ │ │ │ └── ddn_loss.py
│ │ │ │ └── depth_ffn.py
│ │ │ ├── mean_vfe.py
│ │ │ ├── pillar_vfe.py
│ │ │ └── vfe_template.py
│ ├── dense_heads
│ │ ├── __init__.py
│ │ ├── anchor_head_multi.py
│ │ ├── anchor_head_single.py
│ │ ├── anchor_head_template.py
│ │ ├── point_head_box.py
│ │ ├── point_head_simple.py
│ │ ├── point_head_template.py
│ │ ├── point_intra_part_head.py
│ │ └── target_assigner
│ │ │ ├── anchor_generator.py
│ │ │ ├── atss_target_assigner.py
│ │ │ └── axis_aligned_target_assigner.py
│ ├── detectors
│ │ ├── PartA2_net.py
│ │ ├── __init__.py
│ │ ├── caddn.py
│ │ ├── detector3d_template.py
│ │ ├── point_rcnn.py
│ │ ├── pointpillar.py
│ │ ├── pv_rcnn.py
│ │ ├── second_net.py
│ │ ├── second_net_iou.py
│ │ └── voxel_rcnn.py
│ ├── model_utils
│ │ ├── basic_block_2d.py
│ │ └── model_nms_utils.py
│ └── roi_heads
│ │ ├── __init__.py
│ │ ├── partA2_head.py
│ │ ├── pointrcnn_head.py
│ │ ├── pvrcnn_head.py
│ │ ├── roi_head_template.py
│ │ ├── second_head.py
│ │ ├── target_assigner
│ │ └── proposal_target_layer.py
│ │ └── voxelrcnn_head.py
├── ops
│ ├── iou3d_nms
│ │ ├── iou3d_nms_utils.py
│ │ └── src
│ │ │ ├── iou3d_cpu.cpp
│ │ │ ├── iou3d_cpu.h
│ │ │ ├── iou3d_nms.cpp
│ │ │ ├── iou3d_nms.h
│ │ │ ├── iou3d_nms_api.cpp
│ │ │ └── iou3d_nms_kernel.cu
│ ├── pointnet2
│ │ ├── pointnet2_batch
│ │ │ ├── pointnet2_modules.py
│ │ │ ├── pointnet2_utils.py
│ │ │ └── src
│ │ │ │ ├── ball_query.cpp
│ │ │ │ ├── ball_query_gpu.cu
│ │ │ │ ├── ball_query_gpu.h
│ │ │ │ ├── cuda_utils.h
│ │ │ │ ├── group_points.cpp
│ │ │ │ ├── group_points_gpu.cu
│ │ │ │ ├── group_points_gpu.h
│ │ │ │ ├── interpolate.cpp
│ │ │ │ ├── interpolate_gpu.cu
│ │ │ │ ├── interpolate_gpu.h
│ │ │ │ ├── pointnet2_api.cpp
│ │ │ │ ├── sampling.cpp
│ │ │ │ ├── sampling_gpu.cu
│ │ │ │ └── sampling_gpu.h
│ │ └── pointnet2_stack
│ │ │ ├── pointnet2_modules.py
│ │ │ ├── pointnet2_utils.py
│ │ │ ├── src
│ │ │ ├── ball_query.cpp
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── ball_query_gpu.h
│ │ │ ├── cuda_utils.h
│ │ │ ├── group_points.cpp
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── group_points_gpu.h
│ │ │ ├── interpolate.cpp
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── interpolate_gpu.h
│ │ │ ├── pointnet2_api.cpp
│ │ │ ├── sampling.cpp
│ │ │ ├── sampling_gpu.cu
│ │ │ ├── sampling_gpu.h
│ │ │ ├── voxel_query.cpp
│ │ │ ├── voxel_query_gpu.cu
│ │ │ └── voxel_query_gpu.h
│ │ │ ├── voxel_pool_modules.py
│ │ │ └── voxel_query_utils.py
│ ├── roiaware_pool3d
│ │ ├── roiaware_pool3d_utils.py
│ │ └── src
│ │ │ ├── roiaware_pool3d.cpp
│ │ │ └── roiaware_pool3d_kernel.cu
│ └── roipoint_pool3d
│ │ ├── roipoint_pool3d_utils.py
│ │ └── src
│ │ ├── roipoint_pool3d.cpp
│ │ └── roipoint_pool3d_kernel.cu
└── utils
│ ├── box_coder_utils.py
│ ├── box_utils.py
│ ├── calibration_kitti.py
│ ├── common_utils.py
│ ├── loss_utils.py
│ ├── object3d_kitti.py
│ └── transform_utils.py
├── requirements.txt
├── setup.py
└── tools
├── cfgs
├── dataset_configs
│ ├── kitti_dataset.yaml
│ ├── nuscenes_dataset.yaml
│ └── waymo_dataset.yaml
├── kitti_models
│ ├── PartA2.yaml
│ ├── PartA2_free.yaml
│ ├── pointpillar.yaml
│ ├── pointrcnn.yaml
│ ├── pointrcnn_iou.yaml
│ ├── pv_rcnn.yaml
│ ├── second.yaml
│ ├── second_multihead.yaml
│ └── voxle_rcnn_car.yaml
├── nuscenes_models
│ ├── cbgs_pp_multihead.yaml
│ └── cbgs_second_multihead.yaml
└── waymo_models
│ ├── PartA2.yaml
│ ├── pv_rcnn.yaml
│ └── second.yaml
├── demo.py
├── eval_utils
└── eval_utils.py
├── test.py
├── train.py
├── train_utils
├── optimization
│ ├── __init__.py
│ ├── fastai_optim.py
│ └── learning_schedules_fastai.py
└── train_utils.py
└── visual_utils
└── visualize_utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | **__pycache__**
2 | **build**
3 | **egg-info**
4 | **dist**
5 | *.pyc
6 | venv/
7 | *.idea/
8 | *.so
9 | *.sh
10 | *.pth
11 | *.pkl
12 | *.zip
13 | *.bin
14 | output
15 | version.py
16 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "files.associations": {
3 | "array": "cpp",
4 | "string": "cpp",
5 | "string_view": "cpp",
6 | "vector": "cpp"
7 | }
8 | }
--------------------------------------------------------------------------------
/docs/DEMO.md:
--------------------------------------------------------------------------------
1 | # Quick Demo
2 |
3 | Here we provide a quick demo to test a pretrained model on the custom point cloud data and visualize the predicted results.
4 |
5 | We suppose you already followed the [INSTALL.md](INSTALL.md) to install the `OpenPCDet` repo successfully.
6 |
7 | 1. Download the provided pretrained models as shown in the [README.md](../README.md).
8 |
9 | 2. Make sure you have already installed the `mayavi` visualization tools. If not, you could install it as follows:
10 | ```
11 | pip install mayavi
12 | ```
13 |
14 | 3. Prepare your custom point cloud data (skip this step if you use the original KITTI data).
15 | * You need to transform the coordinate of your custom point cloud to
16 | the unified normative coordinate of `OpenPCDet`, that is, x-axis points towards to front direction,
17 | y-axis points towards to the left direction, and z-axis points towards to the top direction.
18 | * (Optional) the z-axis origin of your point cloud coordinate should be about 1.6m above the ground surface,
19 | since currently the provided models are trained on the KITTI dataset.
20 | * Set the intensity information, and save your transformed custom data to `numpy file`:
21 | ```python
22 | # Transform your point cloud data
23 | ...
24 |
25 | # Save it to the file.
26 | # The shape of points should be (num_points, 4), that is [x, y, z, intensity] (Only for KITTI dataset).
27 | # If you doesn't have the intensity information, just set them to zeros.
28 | # If you have the intensity information, you should normalize them to [0, 1].
29 | points[:, 3] = 0
30 | np.save(`my_data.npy`, points)
31 | ```
32 |
33 | 4. Run the demo with a pretrained model (e.g. PV-RCNN) and your custom point cloud data as follows:
34 | ```shell
35 | python demo.py --cfg_file cfgs/kitti_models/pv_rcnn.yaml \
36 | --ckpt pv_rcnn_8369.pth \
37 | --data_path ${POINT_CLOUD_DATA}
38 | ```
39 | Here `${POINT_CLOUD_DATA}` could be in any of the following format:
40 | * Your transformed custom data with a single numpy file like `my_data.npy`.
41 | * Your transformed custom data with a directory to test with multiple point cloud data.
42 | * The original KITTI `.bin` data within `data/kitti`, like `data/kitti/training/velodyne/000008.bin`.
43 |
44 | Then you could see the predicted results with visualized point cloud as follows:
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/docs/INSTALL.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | ### Requirements
4 | All the codes are tested in the following environment:
5 | * Linux (tested on Ubuntu 14.04/16.04)
6 | * Python 3.6+
7 | * PyTorch 1.1 or higher (tested on PyTorch 1.1, 1,3, 1,5)
8 | * CUDA 9.0 or higher (PyTorch 1.3+ needs CUDA 9.2+)
9 | * [`spconv v1.0 (commit 8da6f96)`](https://github.com/traveller59/spconv/tree/8da6f967fb9a054d8870c3515b1b44eca2103634) or [`spconv v1.2`](https://github.com/traveller59/spconv)
10 |
11 |
12 | ### Install `pcdet v0.3`
13 | NOTE: Please re-install `pcdet v0.3` by running `python setup.py develop` even if you have already installed previous version.
14 |
15 | a. Clone this repository.
16 | ```shell
17 | git clone https://github.com/open-mmlab/OpenPCDet.git
18 | ```
19 |
20 | b. Install the dependent libraries as follows:
21 |
22 | * Install the dependent python libraries:
23 | ```
24 | pip install -r requirements.txt
25 | ```
26 |
27 | * Install the SparseConv library, we use the implementation from [`[spconv]`](https://github.com/traveller59/spconv).
28 | * If you use PyTorch 1.1, then make sure you install the `spconv v1.0` with ([commit 8da6f96](https://github.com/traveller59/spconv/tree/8da6f967fb9a054d8870c3515b1b44eca2103634)) instead of the latest one.
29 | * If you use PyTorch 1.3+, then you need to install the `spconv v1.2`. As mentioned by the author of [`spconv`](https://github.com/traveller59/spconv), you need to use their docker if you use PyTorch 1.4+.
30 |
31 | c. Install this `pcdet` library by running the following command:
32 | ```shell
33 | python setup.py develop
34 | ```
35 |
--------------------------------------------------------------------------------
/docs/dataset_vs_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjw-DL/OpenPCDet-Noted/f33241752b1659c53ef98fd621bcd0ed0e1af971/docs/dataset_vs_model.png
--------------------------------------------------------------------------------
/docs/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjw-DL/OpenPCDet-Noted/f33241752b1659c53ef98fd621bcd0ed0e1af971/docs/demo.png
--------------------------------------------------------------------------------
/docs/model_framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjw-DL/OpenPCDet-Noted/f33241752b1659c53ef98fd621bcd0ed0e1af971/docs/model_framework.png
--------------------------------------------------------------------------------
/docs/multiple_models_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjw-DL/OpenPCDet-Noted/f33241752b1659c53ef98fd621bcd0ed0e1af971/docs/multiple_models_demo.png
--------------------------------------------------------------------------------
/docs/open_mmlab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjw-DL/OpenPCDet-Noted/f33241752b1659c53ef98fd621bcd0ed0e1af971/docs/open_mmlab.png
--------------------------------------------------------------------------------
/pcdet/__init__.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | from pathlib import Path
3 |
4 | from .version import __version__
5 |
6 | __all__ = [
7 | '__version__'
8 | ]
9 |
10 |
11 | def get_git_commit_number():
12 | if not (Path(__file__).parent / '../.git').exists():
13 | return '0000000'
14 |
15 | cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
16 | git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
17 | return git_commit_number
18 |
19 |
20 | script_version = get_git_commit_number()
21 |
22 |
23 | if script_version not in __version__:
24 | __version__ = __version__ + '+py%s' % script_version
25 |
--------------------------------------------------------------------------------
/pcdet/config.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import yaml
4 | from easydict import EasyDict
5 |
6 |
7 | def log_config_to_file(cfg, pre='cfg', logger=None):
8 | # 遍历cfg的字典值,写入log文件, 在train或test中调用
9 | for key, val in cfg.items():
10 | if isinstance(cfg[key], EasyDict):
11 | logger.info('\n%s.%s = edict()' % (pre, key))
12 | # 在递归的过程中pre(前缀)在不断更新
13 | log_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger)
14 | continue
15 | logger.info('%s.%s: %s' % (pre, key, val))
16 |
17 |
18 | def cfg_from_list(cfg_list, config):
19 | """Set config keys via list (e.g., from command line)."""
20 | from ast import literal_eval
21 | assert len(cfg_list) % 2 == 0
22 | for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
23 | key_list = k.split('.')
24 | d = config
25 | for subkey in key_list[:-1]:
26 | assert subkey in d, 'NotFoundKey: %s' % subkey
27 | d = d[subkey]
28 | subkey = key_list[-1]
29 | assert subkey in d, 'NotFoundKey: %s' % subkey
30 | try:
31 | value = literal_eval(v)
32 | except:
33 | value = v
34 |
35 | if type(value) != type(d[subkey]) and isinstance(d[subkey], EasyDict):
36 | key_val_list = value.split(',')
37 | for src in key_val_list:
38 | cur_key, cur_val = src.split(':')
39 | val_type = type(d[subkey][cur_key])
40 | cur_val = val_type(cur_val)
41 | d[subkey][cur_key] = cur_val
42 | elif type(value) != type(d[subkey]) and isinstance(d[subkey], list):
43 | val_list = value.split(',')
44 | for k, x in enumerate(val_list):
45 | val_list[k] = type(d[subkey][0])(x)
46 | d[subkey] = val_list
47 | else:
48 | assert type(value) == type(d[subkey]), \
49 | 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
50 | d[subkey] = value
51 |
52 |
53 | def merge_new_config(config, new_config):
54 | # 如果存在基础配置文件的继承,则先加载基础配置文件
55 | if '_BASE_CONFIG_' in new_config:
56 | with open(new_config['_BASE_CONFIG_'], 'r') as f:
57 | try:
58 | yaml_config = yaml.load(f, Loader=yaml.FullLoader)
59 | except:
60 | yaml_config = yaml.load(f)
61 | # 在命令行配置中更新基础配置
62 | config.update(EasyDict(yaml_config))
63 |
64 | for key, val in new_config.items():
65 | # 在该步骤后_BASE_CONFIG_的同名配置会被覆盖
66 | if not isinstance(val, dict):
67 | config[key] = val
68 | continue
69 | if key not in config:
70 | config[key] = EasyDict()
71 | # 递归调用的过程中只有第一次会进入_BASE_CONFIG_
72 | merge_new_config(config[key], val)
73 | # 返回的是包含所有无重复配置的字典值
74 | return config
75 |
76 |
77 | def cfg_from_yaml_file(cfg_file, config):
78 | # 从配置文件中加载为字典
79 | with open(cfg_file, 'r') as f:
80 | try:
81 | new_config = yaml.load(f, Loader=yaml.FullLoader)
82 | except:
83 | new_config = yaml.load(f)
84 | # 将命令行配置和配置文件合并
85 | merge_new_config(config=config, new_config=new_config)
86 |
87 | return config
88 |
89 | # EasyDict可以让你像访问属性一样访问dict里的变量
90 | cfg = EasyDict()
91 | cfg.ROOT_DIR = (Path(__file__).resolve().parent / '../').resolve()
92 | cfg.LOCAL_RANK = 0
93 |
--------------------------------------------------------------------------------
/pcdet/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.utils.data import DataLoader
3 | from torch.utils.data import DistributedSampler as _DistributedSampler
4 |
5 | from pcdet.utils import common_utils
6 |
7 | from .dataset import DatasetTemplate
8 | from .kitti.kitti_dataset import KittiDataset
9 | from .nuscenes.nuscenes_dataset import NuScenesDataset
10 | from .waymo.waymo_dataset import WaymoDataset
11 |
12 | __all__ = {
13 | 'DatasetTemplate': DatasetTemplate,
14 | 'KittiDataset': KittiDataset,
15 | 'NuScenesDataset': NuScenesDataset,
16 | 'WaymoDataset': WaymoDataset
17 | }
18 |
19 |
20 | class DistributedSampler(_DistributedSampler):
21 |
22 | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
23 | super().__init__(dataset, num_replicas=num_replicas, rank=rank)
24 | self.shuffle = shuffle
25 |
26 | def __iter__(self):
27 | if self.shuffle:
28 | g = torch.Generator() # 手动创建随机数生成器
29 | g.manual_seed(self.epoch) # 设置随机数种子
30 | indices = torch.randperm(len(self.dataset), generator=g).tolist() # 生成0-n的随机数排列
31 | else:
32 | indices = torch.arange(len(self.dataset)).tolist() # 如果不打乱则按顺序产生索引
33 |
34 | indices += indices[:(self.total_size - len(indices))] # 如果total_size比indices长则重复采样indices
35 | assert len(indices) == self.total_size # 确保indices的长度和total_size一样长
36 |
37 | indices = indices[self.rank:self.total_size:self.num_replicas] # 间隔采样
38 | assert len(indices) == self.num_samples # 确保当前采样长度和num_samples一样长
39 |
40 | return iter(indices) # 生成迭代器对象,可通过__next__方法逐个生成
41 |
42 |
43 | def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
44 | logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0):
45 | """
46 | 构建数据集并调用DataLoader进行加载
47 | Args:
48 | dataset_cfg: 数据集配置文件
49 | class_names: 类比名称
50 | batch_size: batch的大小
51 | dist: 是否并行训练
52 | root_path: 根目录
53 | workers: 线程数
54 | logger: 日志记录器
55 | training: 训练模式
56 | merge_all_iters_to_one_epoch: 是否将所有迭代次数合并到一个epoch
57 | total_epochs: 总epoch数
58 | Returns
59 | dataset: 数据集
60 | dataloader: 数据加载器
61 | sampler: 数据采样器
62 | """
63 | # 根据数据集名称,初始化数据集,即只执行__init__函数
64 | dataset = __all__[dataset_cfg.DATASET](
65 | dataset_cfg=dataset_cfg,
66 | class_names=class_names,
67 | root_path=root_path,
68 | training=training,
69 | logger=logger,
70 | )
71 |
72 | if merge_all_iters_to_one_epoch:
73 | assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
74 | dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
75 |
76 | if dist:
77 | if training:
78 | sampler = torch.utils.data.distributed.DistributedSampler(dataset)
79 | else:
80 | rank, world_size = common_utils.get_dist_info() # 获取rank和world_size
81 | sampler = DistributedSampler(dataset, world_size, rank, shuffle=False) # 初始化分布式采样器
82 | else:
83 | sampler = None
84 | # 初始化DataLoader,此时并没有进行数据采样和加载,只有在训练中才会按照batch size调用__getitem__加载数据
85 | # 在单卡训练中进行,通过DataLoader进行数据加载
86 | dataloader = DataLoader(
87 | dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,
88 | shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
89 | drop_last=False, sampler=sampler, timeout=0
90 | )
91 |
92 | return dataset, dataloader, sampler
93 |
--------------------------------------------------------------------------------
/pcdet/datasets/augmentor/augmentor_utils.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import numpy as np
3 |
4 | from ...utils import common_utils
5 |
6 |
7 | def random_flip_along_x(gt_boxes, points):
8 | """
9 | 沿着x轴随机翻转
10 | Args:
11 | gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
12 | points: (M, 3 + C)
13 | Returns:
14 | """
15 | # 随机选择是否翻转
16 | enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
17 | if enable:
18 | gt_boxes[:, 1] = -gt_boxes[:, 1] # y坐标翻转
19 | gt_boxes[:, 6] = -gt_boxes[:, 6] # 方位角翻转,直接取负数,因为方位角定义为与x轴的夹角(这里按照顺时针的方向取角度)
20 | points[:, 1] = -points[:, 1] # 点云y坐标翻转
21 |
22 | if gt_boxes.shape[1] > 7:
23 | gt_boxes[:, 8] = -gt_boxes[:, 8] # 如果有速度,y方向速度翻转
24 |
25 | return gt_boxes, points
26 |
27 |
28 | def random_flip_along_y(gt_boxes, points):
29 | """
30 | 沿着y轴随机翻转
31 | Args:
32 | gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
33 | points: (M, 3 + C)
34 | Returns:
35 | """
36 | # 随机旋转是否翻转
37 | enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
38 | if enable:
39 | gt_boxes[:, 0] = -gt_boxes[:, 0] # x坐标翻转
40 | gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi) # 方位角加pi后,取负数(这里按照顺时针的方向取角度)
41 | points[:, 0] = -points[:, 0]# 点云x坐标取反
42 |
43 | if gt_boxes.shape[1] > 7:
44 | gt_boxes[:, 7] = -gt_boxes[:, 7] # 如果有速度,x方向速度取反
45 |
46 |
47 | return gt_boxes, points
48 |
49 |
50 | def global_rotation(gt_boxes, points, rot_range):
51 | """
52 | 对点云和box进行整体旋转
53 | Args:
54 | gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
55 | points: (M, 3 + C),
56 | rot_range: [min, max]
57 | Returns:
58 | """
59 | # 在均匀分布中随机产生旋转角度
60 | noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
61 | # 沿z轴旋转noise_rotation弧度,这里之所以取第0个,是因为rotate_points_along_z对batch进行处理,而这里仅处理单个点云
62 | points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
63 | # 同样对box的坐标进行旋转
64 | gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
65 | # 对box的方位角进行累加
66 | gt_boxes[:, 6] += noise_rotation
67 | # 对速度进行旋转,由于速度仅有x和y两个维度,所以补出第三维度,增加batch维度后进行旋转
68 | if gt_boxes.shape[1] > 7:
69 | gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
70 | np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
71 | np.array([noise_rotation])
72 | )[0][:, 0:2]
73 |
74 | return gt_boxes, points
75 |
76 |
77 | def global_scaling(gt_boxes, points, scale_range):
78 | """
79 | 随机缩放
80 | Args:
81 | gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
82 | points: (M, 3 + C),
83 | scale_range: [min, max]
84 | Returns:
85 | """
86 | # 如果缩放的尺度过小,则直接返回原来的box和点云
87 | if scale_range[1] - scale_range[0] < 1e-3:
88 | return gt_boxes, points
89 | # 在缩放范围内随机产生缩放尺度
90 | noise_scale = np.random.uniform(scale_range[0], scale_range[1])
91 | # 将点云和box同时乘以缩放尺度
92 | points[:, :3] *= noise_scale
93 | gt_boxes[:, :6] *= noise_scale
94 | return gt_boxes, points
95 |
96 |
97 | def random_image_flip_horizontal(image, depth_map, gt_boxes, calib):
98 | """
99 | Performs random horizontal flip augmentation
100 | Args:
101 | image: (H_image, W_image, 3), Image
102 | depth_map: (H_depth, W_depth), Depth map
103 | gt_boxes: (N, 7), 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry]
104 | calib: calibration.Calibration, Calibration object
105 | Returns:
106 | aug_image: (H_image, W_image, 3), Augmented image
107 | aug_depth_map: (H_depth, W_depth), Augmented depth map
108 | aug_gt_boxes: (N, 7), Augmented 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry]
109 | """
110 | # Randomly augment with 50% chance
111 | enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
112 |
113 | if enable:
114 | # Flip images
115 | aug_image = np.fliplr(image)
116 | aug_depth_map = np.fliplr(depth_map)
117 |
118 | # Flip 3D gt_boxes by flipping the centroids in image space
119 | aug_gt_boxes = copy.copy(gt_boxes)
120 | locations = aug_gt_boxes[:, :3] # 取出前3维中心点
121 | img_pts, img_depth = calib.lidar_to_img(locations) # 找到box在图片的中心点
122 | W = image.shape[1] # 获取图片宽度
123 | img_pts[:, 0] = W - img_pts[:, 0] # 计算中心点翻转后的坐标
124 | pts_rect = calib.img_to_rect(u=img_pts[:, 0], v=img_pts[:, 1], depth_rect=img_depth)
125 | pts_lidar = calib.rect_to_lidar(pts_rect) # 将中心点坐标转换回点云坐标系
126 | aug_gt_boxes[:, :3] = pts_lidar # 点云坐标
127 | aug_gt_boxes[:, 6] = -1 * aug_gt_boxes[:, 6] # box的方位角取反
128 |
129 | else:
130 | aug_image = image
131 | aug_depth_map = depth_map
132 | aug_gt_boxes = gt_boxes
133 |
134 | return aug_image, aug_depth_map, aug_gt_boxes
--------------------------------------------------------------------------------
/pcdet/datasets/kitti/kitti_object_eval_python/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/pcdet/datasets/kitti/kitti_object_eval_python/README.md:
--------------------------------------------------------------------------------
1 | # kitti-object-eval-python
2 | **Note**: This is borrowed from [traveller59/kitti-object-eval-python](https://github.com/traveller59/kitti-object-eval-python)
3 |
4 | Fast kitti object detection eval in python(finish eval in less than 10 second), support 2d/bev/3d/aos. , support coco-style AP. If you use command line interface, numba need some time to compile jit functions.
5 | ## Dependencies
6 | Only support python 3.6+, need `numpy`, `skimage`, `numba`, `fire`. If you have Anaconda, just install `cudatoolkit` in anaconda. Otherwise, please reference to this [page](https://github.com/numba/numba#custom-python-environments) to set up llvm and cuda for numba.
7 | * Install by conda:
8 | ```
9 | conda install -c numba cudatoolkit=x.x (8.0, 9.0, 9.1, depend on your environment)
10 | ```
11 | ## Usage
12 | * commandline interface:
13 | ```
14 | python evaluate.py evaluate --label_path=/path/to/your_gt_label_folder --result_path=/path/to/your_result_folder --label_split_file=/path/to/val.txt --current_class=0 --coco=False
15 | ```
16 | * python interface:
17 | ```Python
18 | import kitti_common as kitti
19 | from eval import get_official_eval_result, get_coco_eval_result
20 | def _read_imageset_file(path):
21 | with open(path, 'r') as f:
22 | lines = f.readlines()
23 | return [int(line) for line in lines]
24 | det_path = "/path/to/your_result_folder"
25 | dt_annos = kitti.get_label_annos(det_path)
26 | gt_path = "/path/to/your_gt_label_folder"
27 | gt_split_file = "/path/to/val.txt" # from https://xiaozhichen.github.io/files/mv3d/imagesets.tar.gz
28 | val_image_ids = _read_imageset_file(gt_split_file)
29 | gt_annos = kitti.get_label_annos(gt_path, val_image_ids)
30 | print(get_official_eval_result(gt_annos, dt_annos, 0)) # 6s in my computer
31 | print(get_coco_eval_result(gt_annos, dt_annos, 0)) # 18s in my computer
32 | ```
33 |
--------------------------------------------------------------------------------
/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import fire
4 |
5 | import .kitti_common as kitti
6 | from .eval import get_coco_eval_result, get_official_eval_result
7 |
8 |
9 | def _read_imageset_file(path):
10 | with open(path, 'r') as f:
11 | lines = f.readlines()
12 | return [int(line) for line in lines]
13 |
14 |
15 | def evaluate(label_path,
16 | result_path,
17 | label_split_file,
18 | current_class=0,
19 | coco=False,
20 | score_thresh=-1):
21 | dt_annos = kitti.get_label_annos(result_path)
22 | if score_thresh > 0:
23 | dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
24 | val_image_ids = _read_imageset_file(label_split_file)
25 | gt_annos = kitti.get_label_annos(label_path, val_image_ids)
26 | if coco:
27 | return get_coco_eval_result(gt_annos, dt_annos, current_class)
28 | else:
29 | return get_official_eval_result(gt_annos, dt_annos, current_class)
30 |
31 |
32 | if __name__ == '__main__':
33 | fire.Fire()
34 |
--------------------------------------------------------------------------------
/pcdet/datasets/kitti/kitti_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from ...utils import box_utils
3 |
4 |
5 | def transform_annotations_to_kitti_format(annos, map_name_to_kitti=None, info_with_fakelidar=False):
6 | """
7 | Args:
8 | annos:
9 | map_name_to_kitti: dict, map name to KITTI names (Car, Pedestrian, Cyclist)
10 | info_with_fakelidar:
11 | Returns:
12 |
13 | """
14 | for anno in annos:
15 | for k in range(anno['name'].shape[0]):
16 | anno['name'][k] = map_name_to_kitti[anno['name'][k]]
17 |
18 | anno['bbox'] = np.zeros((len(anno['name']), 4))
19 | anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
20 | anno['truncated'] = np.zeros(len(anno['name']))
21 | anno['occluded'] = np.zeros(len(anno['name']))
22 | if 'boxes_lidar' in anno:
23 | gt_boxes_lidar = anno['boxes_lidar'].copy()
24 | else:
25 | gt_boxes_lidar = anno['gt_boxes_lidar'].copy()
26 |
27 | if len(gt_boxes_lidar) > 0:
28 | if info_with_fakelidar:
29 | gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
30 |
31 | gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
32 | anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
33 | anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
34 | anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
35 | anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
36 | dxdydz = gt_boxes_lidar[:, 3:6]
37 | anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
38 | anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
39 | anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
40 | else:
41 | anno['location'] = anno['dimensions'] = np.zeros((0, 3))
42 | anno['rotation_y'] = anno['alpha'] = np.zeros(0)
43 |
44 | return annos
45 |
46 |
47 | def calib_to_matricies(calib):
48 | """
49 | Converts calibration object to transformation matricies
50 | Args:
51 | calib: calibration.Calibration, Calibration object
52 | Returns
53 | V2R: (4, 4), Lidar to rectified camera transformation matrix
54 | P2: (3, 4), Camera projection matrix
55 | """
56 | V2C = np.vstack((calib.V2C, np.array([0, 0, 0, 1], dtype=np.float32))) # (4, 4)
57 | R0 = np.hstack((calib.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
58 | R0 = np.vstack((R0, np.array([0, 0, 0, 1], dtype=np.float32))) # (4, 4)
59 | V2R = R0 @ V2C
60 | P2 = calib.P2
61 | return V2R, P2
--------------------------------------------------------------------------------
/pcdet/datasets/processor/point_feature_encoder.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class PointFeatureEncoder(object):
5 | """
6 | 该类决定使用点的哪些属性 比如x,y,z等
7 | """
8 | def __init__(self, config, point_cloud_range=None):
9 | super().__init__()
10 | self.point_encoding_config = config
11 | assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z']
12 | self.used_feature_list = self.point_encoding_config.used_feature_list # ['x', 'y', 'z', 'intensity']
13 | self.src_feature_list = self.point_encoding_config.src_feature_list # ['x', 'y', 'z', 'intensity']
14 | self.point_cloud_range = point_cloud_range
15 |
16 | @property
17 | def num_point_features(self):
18 | # encoding_type: absolute_coordinates_encoding
19 | return getattr(self, self.point_encoding_config.encoding_type)(points=None) # 4
20 |
21 | def forward(self, data_dict):
22 | """
23 | Args:
24 | data_dict:
25 | points: (N, 3 + C_in)
26 | ...
27 | Returns:
28 | data_dict:
29 | points: (N, 3 + C_out),
30 | use_lead_xyz: whether to use xyz as point-wise features
31 | ...
32 | """
33 | # (N, 4) , True
34 | data_dict['points'], use_lead_xyz = getattr(self, self.point_encoding_config.encoding_type)(
35 | data_dict['points']
36 | )
37 | data_dict['use_lead_xyz'] = use_lead_xyz # True
38 | return data_dict
39 |
40 | def absolute_coordinates_encoding(self, points=None):
41 | if points is None:
42 | num_output_features = len(self.used_feature_list)
43 | return num_output_features
44 |
45 | point_feature_list = [points[:, 0:3]] # (1, N, 3 + C_in)
46 | for x in self.used_feature_list:
47 | if x in ['x', 'y', 'z']:
48 | continue
49 | idx = self.src_feature_list.index(x) # 3
50 | point_feature_list.append(points[:, idx:idx+1]) # [(1, N, 3), (N, 1)]
51 | point_features = np.concatenate(point_feature_list, axis=1) # (N, 4)
52 | return point_features, True
53 |
--------------------------------------------------------------------------------
/pcdet/models/__init__.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from .detectors import build_detector
7 |
8 | try:
9 | import kornia
10 | except:
11 | pass
12 | # print('Warning: kornia is not installed. This package is only required by CaDDN')
13 |
14 |
15 |
16 | def build_network(model_cfg, num_class, dataset):
17 | """
18 | 调用detectors中__init__.py中的build_detector构建网络模型
19 | """
20 | model = build_detector(
21 | model_cfg=model_cfg, num_class=num_class, dataset=dataset
22 | )
23 | return model
24 |
25 |
26 | def load_data_to_gpu(batch_dict):
27 | """
28 | 跳过元信息和标定数据,同时根据数据类型转换数据类型,再放到gpu上
29 | """
30 | for key, val in batch_dict.items():
31 | if not isinstance(val, np.ndarray):
32 | continue
33 | elif key in ['frame_id', 'metadata', 'calib']:
34 | continue
35 | elif key in ['images']:
36 | batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous()
37 | elif key in ['image_shape']:
38 | batch_dict[key] = torch.from_numpy(val).int().cuda()
39 | else:
40 | batch_dict[key] = torch.from_numpy(val).float().cuda()
41 |
42 |
43 | def model_fn_decorator():
44 | """
45 | 模型函数装饰器
46 | """
47 | # 定义一个namedtuple类型: https://blog.csdn.net/kongxx/article/details/51553362
48 | # 包括平均损失,tensorboard结果字典,显示结果字典
49 | ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
50 |
51 | def model_func(model, batch_dict):
52 | # 1.将数据放到GPU上
53 | load_data_to_gpu(batch_dict)
54 | # 2.将数据放入模型,得到结果字典
55 | ret_dict, tb_dict, disp_dict = model(batch_dict)
56 | # 3.计算平均损失
57 | loss = ret_dict['loss'].mean()
58 | # 4.更新当前训练的迭代次数,Detector3DTemplate中进行注册global_step
59 | # global_step在滑动平均、优化器、指数衰减学习率等方面都有用到
60 | # global_step的初始化值是0
61 | if hasattr(model, 'update_global_step'):
62 | model.update_global_step()
63 | else:
64 | model.module.update_global_step()
65 | # 5.创建一个ModelReturn对象并返回,
66 | return ModelReturn(loss, tb_dict, disp_dict)
67 |
68 | return model_func
69 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_2d/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_bev_backbone import BaseBEVBackbone
2 |
3 | __all__ = {
4 | 'BaseBEVBackbone': BaseBEVBackbone
5 | }
6 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_2d/base_bev_backbone.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | class BaseBEVBackbone(nn.Module):
7 | def __init__(self, model_cfg, input_channels):
8 | super().__init__()
9 | self.model_cfg = model_cfg
10 | # 读取下采样层参数
11 | if self.model_cfg.get('LAYER_NUMS', None) is not None:
12 | assert len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS)
13 | layer_nums = self.model_cfg.LAYER_NUMS # (3, 5, 5)
14 | layer_strides = self.model_cfg.LAYER_STRIDES # (2, 2, 2)
15 | num_filters = self.model_cfg.NUM_FILTERS # (64, 128, 256)
16 | else:
17 | layer_nums = layer_strides = num_filters = []
18 | # 读取上采样层参数
19 | if self.model_cfg.get('UPSAMPLE_STRIDES', None) is not None:
20 | assert len(self.model_cfg.UPSAMPLE_STRIDES) == len(self.model_cfg.NUM_UPSAMPLE_FILTERS)
21 | num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS # (128, 128, 128)
22 | upsample_strides = self.model_cfg.UPSAMPLE_STRIDES # (1, 2, 4)
23 | else:
24 | upsample_strides = num_upsample_filters = []
25 |
26 | num_levels = len(layer_nums) # 3
27 | c_in_list = [input_channels, *num_filters[:-1]] # (64, 64, 128) input_channels:64, num_filters[:-1]:64,128
28 | self.blocks = nn.ModuleList()
29 | self.deblocks = nn.ModuleList()
30 | for idx in range(num_levels):
31 | # (64,64)-->(64,128)-->(128,256)
32 | # 这里为cur_layers的第一层且stride=2
33 | cur_layers = [
34 | nn.ZeroPad2d(1),
35 | nn.Conv2d(
36 | c_in_list[idx], num_filters[idx], kernel_size=3,
37 | stride=layer_strides[idx], padding=0, bias=False
38 | ),
39 | nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
40 | nn.ReLU()
41 | ]
42 | # 根据layer_nums堆叠卷积层
43 | for k in range(layer_nums[idx]):
44 | cur_layers.extend([
45 | nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False),
46 | nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
47 | nn.ReLU()
48 | ])
49 | # 在block中添加该层
50 | # *作用是:将列表解开成几个独立的参数,传入函数
51 | # 类似的运算符还有两个星号(**),是将字典解开成独立的元素作为形参
52 | self.blocks.append(nn.Sequential(*cur_layers))
53 |
54 | # 构造上采样层
55 | if len(upsample_strides) > 0: # (1, 2, 4)
56 | stride = upsample_strides[idx]
57 | if stride >= 1:
58 | self.deblocks.append(nn.Sequential(
59 | nn.ConvTranspose2d(
60 | num_filters[idx], num_upsample_filters[idx], # (128,128,128)
61 | upsample_strides[idx],
62 | stride=upsample_strides[idx], bias=False
63 | ),
64 | nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
65 | nn.ReLU()
66 | ))
67 | else:
68 | stride = np.round(1 / stride).astype(np.int)
69 | self.deblocks.append(nn.Sequential(
70 | nn.Conv2d(
71 | num_filters[idx], num_upsample_filters[idx],
72 | stride,
73 | stride=stride, bias=False
74 | ),
75 | nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
76 | nn.ReLU()
77 | ))
78 |
79 | c_in = sum(num_upsample_filters) # 384
80 | if len(upsample_strides) > num_levels:
81 | self.deblocks.append(nn.Sequential(
82 | nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False),
83 | nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01),
84 | nn.ReLU(),
85 | ))
86 |
87 | self.num_bev_features = c_in
88 |
89 | def forward(self, data_dict):
90 | """
91 | Args:
92 | data_dict:
93 | spatial_features:(4,64,496,432)
94 | Returns:
95 | """
96 | spatial_features = data_dict['spatial_features']
97 | ups = []
98 | ret_dict = {}
99 | x = spatial_features
100 | for i in range(len(self.blocks)):
101 | x = self.blocks[i](x)
102 |
103 | stride = int(spatial_features.shape[2] / x.shape[2])
104 | ret_dict['spatial_features_%dx' % stride] = x
105 | # (4,64,248,216)-->(4,128,124,108)-->(4,256,62,54)
106 | if len(self.deblocks) > 0:
107 | ups.append(self.deblocks[i](x))
108 | else:
109 | ups.append(x)
110 |
111 | # 如果存在上采样层,将上采样结果连接
112 | if len(ups) > 1:
113 | x = torch.cat(ups, dim=1) # (4,384,248,216)
114 | elif len(ups) == 1:
115 | x = ups[0]
116 |
117 | if len(self.deblocks) > len(self.blocks):
118 | x = self.deblocks[-1](x)
119 |
120 | # 将结果存储在spatial_features_2d中并返回
121 | data_dict['spatial_features_2d'] = x
122 |
123 | return data_dict
124 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_2d/map_to_bev/__init__.py:
--------------------------------------------------------------------------------
1 | from .height_compression import HeightCompression
2 | from .pointpillar_scatter import PointPillarScatter
3 | from .conv2d_collapse import Conv2DCollapse
4 |
5 | __all__ = {
6 | 'HeightCompression': HeightCompression,
7 | 'PointPillarScatter': PointPillarScatter,
8 | 'Conv2DCollapse': Conv2DCollapse
9 | }
10 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_2d/map_to_bev/conv2d_collapse.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from pcdet.models.model_utils.basic_block_2d import BasicBlock2D
5 |
6 |
7 | class Conv2DCollapse(nn.Module):
8 |
9 | def __init__(self, model_cfg, grid_size):
10 | """
11 | Initializes 2D convolution collapse module
12 | Args:
13 | model_cfg: EasyDict, Model configuration
14 | grid_size: (X, Y, Z) Voxel grid size
15 | """
16 | super().__init__()
17 | self.model_cfg = model_cfg
18 | self.num_heights = grid_size[-1]
19 | self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
20 | self.block = BasicBlock2D(in_channels=self.num_bev_features * self.num_heights,
21 | out_channels=self.num_bev_features,
22 | **self.model_cfg.ARGS)
23 |
24 | def forward(self, batch_dict):
25 | """
26 | Collapses voxel features to BEV via concatenation and channel reduction
27 | Args:
28 | batch_dict:
29 | voxel_features: (B, C, Z, Y, X), Voxel feature representation
30 | Returns:
31 | batch_dict:
32 | spatial_features: (B, C, Y, X), BEV feature representation
33 | """
34 | voxel_features = batch_dict["voxel_features"]
35 | bev_features = voxel_features.flatten(start_dim=1, end_dim=2) # (B, C, Z, Y, X) -> (B, C*Z, Y, X)
36 | bev_features = self.block(bev_features) # (B, C*Z, Y, X) -> (B, C, Y, X)
37 | batch_dict["spatial_features"] = bev_features
38 | return batch_dict
39 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_2d/map_to_bev/height_compression.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class HeightCompression(nn.Module):
5 | def __init__(self, model_cfg, **kwargs):
6 | """
7 | 在高度方向上进行压缩
8 | """
9 | super().__init__()
10 | self.model_cfg = model_cfg
11 | self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES # 256
12 |
13 | def forward(self, batch_dict):
14 | """
15 | Args:
16 | batch_dict:
17 | encoded_spconv_tensor: sparse tensor
18 | Returns:
19 | batch_dict:
20 | spatial_features:
21 |
22 | """
23 | encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
24 | # 结合batch,spatial_shape、indice和feature将特征还原的对应位置
25 | spatial_features = encoded_spconv_tensor.dense()
26 | N, C, D, H, W = spatial_features.shape # 4,128,2,200,150
27 | spatial_features = spatial_features.view(N, C * D, H, W) # (4,256,200,150)在高度方向上合并,将特征图压缩至BEV特征图
28 | # 将特征和采样尺度加入batch_dict
29 | batch_dict['spatial_features'] = spatial_features
30 | batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride'] # 8
31 | return batch_dict
32 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class PointPillarScatter(nn.Module):
6 | """
7 | 对应到论文中就是stacked pillars,将生成的pillar按照坐标索引还原到原空间中
8 | """
9 | def __init__(self, model_cfg, grid_size, **kwargs):
10 | super().__init__()
11 |
12 | self.model_cfg = model_cfg
13 | self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES # 64
14 | self.nx, self.ny, self.nz = grid_size # [432,496,1]
15 | assert self.nz == 1
16 |
17 | def forward(self, batch_dict, **kwargs):
18 | """
19 | Args:
20 | pillar_features:(31530,64)
21 | coords:(31530, 4) 第一维是batch_index
22 | Returns:
23 | batch_spatial_features:(4, 64, 496, 432)
24 | """
25 | pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords']
26 | batch_spatial_features = []
27 | batch_size = coords[:, 0].max().int().item() + 1 # 4
28 | for batch_idx in range(batch_size):
29 | spatial_feature = torch.zeros(
30 | self.num_bev_features,
31 | self.nz * self.nx * self.ny,
32 | dtype=pillar_features.dtype,
33 | device=pillar_features.device) # (64,214272)-->1x432x496=214272
34 | # batch_index的mask
35 | batch_mask = coords[:, 0] == batch_idx
36 | # 根据mask提取坐标
37 | this_coords = coords[batch_mask, :] # (8629,4)
38 | # 这里的坐标是z,y和x的形式,且只有一层,因此计算索引的方式如下
39 | indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
40 | # 转换数据类型
41 | indices = indices.type(torch.long)
42 | # 根据mask提取pillar_features
43 | pillars = pillar_features[batch_mask, :] # (8629,64)
44 | pillars = pillars.t() # (64,8629)
45 | # 在索引位置填充pillars
46 | spatial_feature[:, indices] = pillars
47 | # 将空间特征加入list,每个元素为(64,214272)
48 | batch_spatial_features.append(spatial_feature)
49 |
50 | batch_spatial_features = torch.stack(batch_spatial_features, 0) # (4, 64, 214272)
51 | # reshape回原空间(伪图像)--> (4, 64, 496, 432)
52 | batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features * self.nz, self.ny, self.nx)
53 | # 将结果加入batch_dict
54 | batch_dict['spatial_features'] = batch_spatial_features
55 | return batch_dict
56 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/__init__.py:
--------------------------------------------------------------------------------
1 | from .pointnet2_backbone import PointNet2Backbone, PointNet2MSG
2 | from .spconv_backbone import VoxelBackBone8x, VoxelResBackBone8x
3 | from .spconv_unet import UNetV2
4 |
5 | __all__ = {
6 | 'VoxelBackBone8x': VoxelBackBone8x,
7 | 'UNetV2': UNetV2,
8 | 'PointNet2Backbone': PointNet2Backbone,
9 | 'PointNet2MSG': PointNet2MSG,
10 | 'VoxelResBackBone8x': VoxelResBackBone8x,
11 | }
12 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/pfe/__init__.py:
--------------------------------------------------------------------------------
1 | from .voxel_set_abstraction import VoxelSetAbstraction
2 |
3 | __all__ = {
4 | 'VoxelSetAbstraction': VoxelSetAbstraction
5 | }
6 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/__init__.py:
--------------------------------------------------------------------------------
1 | from .mean_vfe import MeanVFE
2 | from .pillar_vfe import PillarVFE
3 | from .image_vfe import ImageVFE
4 | from .vfe_template import VFETemplate
5 |
6 | __all__ = {
7 | 'VFETemplate': VFETemplate,
8 | 'MeanVFE': MeanVFE,
9 | 'PillarVFE': PillarVFE,
10 | 'ImageVFE': ImageVFE
11 | }
12 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from .vfe_template import VFETemplate
4 | from .image_vfe_modules import ffn, f2v
5 |
6 |
7 | class ImageVFE(VFETemplate):
8 | def __init__(self, model_cfg, grid_size, point_cloud_range, depth_downsample_factor, **kwargs):
9 | super().__init__(model_cfg=model_cfg)
10 | self.grid_size = grid_size
11 | self.pc_range = point_cloud_range
12 | self.downsample_factor = depth_downsample_factor
13 | self.module_topology = [
14 | 'ffn', 'f2v'
15 | ]
16 | self.build_modules()
17 |
18 | def build_modules(self):
19 | """
20 | Builds modules
21 | """
22 | for module_name in self.module_topology:
23 | module = getattr(self, 'build_%s' % module_name)()
24 | self.add_module(module_name, module)
25 |
26 | def build_ffn(self):
27 | """
28 | Builds frustum feature network
29 | Returns:
30 | ffn_module: nn.Module, Frustum feature network
31 | """
32 | ffn_module = ffn.__all__[self.model_cfg.FFN.NAME](
33 | model_cfg=self.model_cfg.FFN,
34 | downsample_factor=self.downsample_factor
35 | )
36 | self.disc_cfg = ffn_module.disc_cfg
37 | return ffn_module
38 |
39 | def build_f2v(self):
40 | """
41 | Builds frustum to voxel transformation
42 | Returns:
43 | f2v_module: nn.Module, Frustum to voxel transformation
44 | """
45 | f2v_module = f2v.__all__[self.model_cfg.F2V.NAME](
46 | model_cfg=self.model_cfg.F2V,
47 | grid_size=self.grid_size,
48 | pc_range=self.pc_range,
49 | disc_cfg=self.disc_cfg
50 | )
51 | return f2v_module
52 |
53 | def get_output_feature_dim(self):
54 | """
55 | Gets number of output channels
56 | Returns:
57 | out_feature_dim: int, Number of output channels
58 | """
59 | out_feature_dim = self.ffn.get_output_feature_dim()
60 | return out_feature_dim
61 |
62 | def forward(self, batch_dict, **kwargs):
63 | """
64 | Args:
65 | batch_dict:
66 | images: (N, 3, H_in, W_in), Input images
67 | **kwargs:
68 | Returns:
69 | batch_dict:
70 | voxel_features: (B, C, Z, Y, X), Image voxel features
71 | """
72 | batch_dict = self.ffn(batch_dict)
73 | batch_dict = self.f2v(batch_dict)
74 | return batch_dict
75 |
76 | def get_loss(self):
77 | """
78 | Gets DDN loss
79 | Returns:
80 | loss: (1), Depth distribution network loss
81 | tb_dict: dict[float], All losses to log in tensorboard
82 | """
83 |
84 | loss, tb_dict = self.ffn.get_loss()
85 | return loss, tb_dict
86 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/__init__.py:
--------------------------------------------------------------------------------
1 | from .frustum_to_voxel import FrustumToVoxel
2 |
3 | __all__ = {
4 | 'FrustumToVoxel': FrustumToVoxel
5 | }
6 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from .frustum_grid_generator import FrustumGridGenerator
5 | from .sampler import Sampler
6 |
7 |
8 | class FrustumToVoxel(nn.Module):
9 |
10 | def __init__(self, model_cfg, grid_size, pc_range, disc_cfg):
11 | """
12 | Initializes module to transform frustum features to voxel features via 3D transformation and sampling
13 | Args:
14 | model_cfg: EasyDict, Module configuration
15 | grid_size: [X, Y, Z], Voxel grid size
16 | pc_range: [x_min, y_min, z_min, x_max, y_max, z_max], Voxelization point cloud range (m)
17 | disc_cfg: EasyDict, Depth discretiziation configuration
18 | """
19 | super().__init__()
20 | self.model_cfg = model_cfg
21 | self.grid_size = grid_size
22 | self.pc_range = pc_range
23 | self.disc_cfg = disc_cfg
24 | self.grid_generator = FrustumGridGenerator(grid_size=grid_size,
25 | pc_range=pc_range,
26 | disc_cfg=disc_cfg)
27 | self.sampler = Sampler(**model_cfg.SAMPLER)
28 |
29 | def forward(self, batch_dict):
30 | """
31 | Generates voxel features via 3D transformation and sampling
32 | Args:
33 | batch_dict:
34 | frustum_features: (B, C, D, H_image, W_image), Image frustum features
35 | lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation
36 | cam_to_img: (B, 3, 4), Camera projection matrix
37 | image_shape: (B, 2), Image shape [H, W]
38 | Returns:
39 | batch_dict:
40 | voxel_features: (B, C, Z, Y, X), Image voxel features
41 | """
42 | # Generate sampling grid for frustum volume
43 | grid = self.grid_generator(lidar_to_cam=batch_dict["trans_lidar_to_cam"],
44 | cam_to_img=batch_dict["trans_cam_to_img"],
45 | image_shape=batch_dict["image_shape"]) # (B, X, Y, Z, 3)
46 |
47 | # Sample frustum volume to generate voxel volume
48 | voxel_features = self.sampler(input_features=batch_dict["frustum_features"],
49 | grid=grid) # (B, C, X, Y, Z)
50 |
51 | # (B, C, X, Y, Z) -> (B, C, Z, Y, X)
52 | voxel_features = voxel_features.permute(0, 1, 4, 3, 2)
53 | batch_dict["voxel_features"] = voxel_features
54 | return batch_dict
55 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class Sampler(nn.Module):
7 |
8 | def __init__(self, mode="bilinear", padding_mode="zeros"):
9 | """
10 | Initializes module
11 | Args:
12 | mode: string, Sampling mode [bilinear/nearest]
13 | padding_mode: string, Padding mode for outside grid values [zeros/border/reflection]
14 | """
15 | super().__init__()
16 | self.mode = mode
17 | self.padding_mode = padding_mode
18 |
19 | def forward(self, input_features, grid):
20 | """
21 | Samples input using sampling grid
22 | Args:
23 | input_features: (B, C, D, H, W), Input frustum features
24 | grid: (B, X, Y, Z, 3), Sampling grids for input features
25 | Returns
26 | output_features: (B, C, X, Y, Z) Output voxel features
27 | """
28 | # Sample from grid
29 | output = F.grid_sample(input=input_features, grid=grid, mode=self.mode, padding_mode=self.padding_mode)
30 | return output
31 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/__init__.py:
--------------------------------------------------------------------------------
1 | from .depth_ffn import DepthFFN
2 |
3 | __all__ = {
4 | 'DepthFFN': DepthFFN
5 | }
6 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/__init__.py:
--------------------------------------------------------------------------------
1 | from .ddn_deeplabv3 import DDNDeepLabV3
2 |
3 | __all__ = {
4 | 'DDNDeepLabV3': DDNDeepLabV3
5 | }
6 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py:
--------------------------------------------------------------------------------
1 | import torchvision
2 |
3 | from .ddn_template import DDNTemplate
4 |
5 |
6 | class DDNDeepLabV3(DDNTemplate):
7 |
8 | def __init__(self, backbone_name, **kwargs):
9 | """
10 | Initializes DDNDeepLabV3 model
11 | Args:
12 | backbone_name: string, ResNet Backbone Name [ResNet50/ResNet101]
13 | """
14 | if backbone_name == "ResNet50":
15 | constructor = torchvision.models.segmentation.deeplabv3_resnet50
16 | elif backbone_name == "ResNet101":
17 | constructor = torchvision.models.segmentation.deeplabv3_resnet101
18 | else:
19 | raise NotImplementedError
20 |
21 | super().__init__(constructor=constructor, **kwargs)
22 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/__init__.py:
--------------------------------------------------------------------------------
1 | from .ddn_loss import DDNLoss
2 |
3 | __all__ = {
4 | "DDNLoss": DDNLoss
5 | }
6 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from pcdet.utils import loss_utils
5 |
6 |
7 | class Balancer(nn.Module):
8 | def __init__(self, fg_weight, bg_weight, downsample_factor=1):
9 | """
10 | Initialize fixed foreground/background loss balancer
11 | Args:
12 | fg_weight: float, Foreground loss weight
13 | bg_weight: float, Background loss weight
14 | downsample_factor: int, Depth map downsample factor
15 | """
16 | super().__init__()
17 | self.fg_weight = fg_weight
18 | self.bg_weight = bg_weight
19 | self.downsample_factor = downsample_factor
20 |
21 | def forward(self, loss, gt_boxes2d):
22 | """
23 | Forward pass
24 | Args:
25 | loss: (B, H, W), Pixel-wise loss
26 | gt_boxes2d: (B, N, 4), 2D box labels for foreground/background balancing
27 | Returns:
28 | loss: (1), Total loss after foreground/background balancing
29 | tb_dict: dict[float], All losses to log in tensorboard
30 | """
31 | # Compute masks
32 | fg_mask = loss_utils.compute_fg_mask(gt_boxes2d=gt_boxes2d,
33 | shape=loss.shape,
34 | downsample_factor=self.downsample_factor,
35 | device=loss.device)
36 | bg_mask = ~fg_mask
37 |
38 | # Compute balancing weights
39 | weights = self.fg_weight * fg_mask + self.bg_weight * bg_mask
40 | num_pixels = fg_mask.sum() + bg_mask.sum()
41 |
42 | # Compute losses
43 | loss *= weights
44 | fg_loss = loss[fg_mask].sum() / num_pixels
45 | bg_loss = loss[bg_mask].sum() / num_pixels
46 |
47 | # Get total loss
48 | loss = fg_loss + bg_loss
49 | tb_dict = {"balancer_loss": loss.item(), "fg_loss": fg_loss.item(), "bg_loss": bg_loss.item()}
50 | return loss, tb_dict
51 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | from .balancer import Balancer
6 | from pcdet.utils import transform_utils
7 |
8 | try:
9 | from kornia.losses.focal import FocalLoss
10 | except:
11 | pass
12 | # print('Warning: kornia is not installed. This package is only required by CaDDN')
13 |
14 |
15 | class DDNLoss(nn.Module):
16 |
17 | def __init__(self,
18 | weight,
19 | alpha,
20 | gamma,
21 | disc_cfg,
22 | fg_weight,
23 | bg_weight,
24 | downsample_factor):
25 | """
26 | Initializes DDNLoss module
27 | Args:
28 | weight: float, Loss function weight
29 | alpha: float, Alpha value for Focal Loss
30 | gamma: float, Gamma value for Focal Loss
31 | disc_cfg: dict, Depth discretiziation configuration
32 | fg_weight: float, Foreground loss weight
33 | bg_weight: float, Background loss weight
34 | downsample_factor: int, Depth map downsample factor
35 | """
36 | super().__init__()
37 | self.device = torch.cuda.current_device()
38 | self.disc_cfg = disc_cfg
39 | self.balancer = Balancer(downsample_factor=downsample_factor,
40 | fg_weight=fg_weight,
41 | bg_weight=bg_weight)
42 |
43 | # Set loss function
44 | self.alpha = alpha
45 | self.gamma = gamma
46 | self.loss_func = FocalLoss(alpha=self.alpha, gamma=self.gamma, reduction="none")
47 | self.weight = weight
48 |
49 | def forward(self, depth_logits, depth_maps, gt_boxes2d):
50 | """
51 | Gets DDN loss
52 | Args:
53 | depth_logits: (B, D+1, H, W), Predicted depth logits
54 | depth_maps: (B, H, W), Depth map [m]
55 | gt_boxes2d: torch.Tensor (B, N, 4), 2D box labels for foreground/background balancing
56 | Returns:
57 | loss: (1), Depth distribution network loss
58 | tb_dict: dict[float], All losses to log in tensorboard
59 | """
60 | tb_dict = {}
61 |
62 | # Bin depth map to create target
63 | depth_target = transform_utils.bin_depths(depth_maps, **self.disc_cfg, target=True)
64 |
65 | # Compute loss
66 | loss = self.loss_func(depth_logits, depth_target)
67 |
68 | # Compute foreground/background balancing
69 | loss, tb_dict = self.balancer(loss=loss, gt_boxes2d=gt_boxes2d)
70 |
71 | # Final loss
72 | loss *= self.weight
73 | tb_dict.update({"ddn_loss": loss.item()})
74 |
75 | return loss, tb_dict
76 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 |
4 | from . import ddn, ddn_loss
5 | from pcdet.models.model_utils.basic_block_2d import BasicBlock2D
6 |
7 |
8 | class DepthFFN(nn.Module):
9 |
10 | def __init__(self, model_cfg, downsample_factor):
11 | """
12 | Initialize frustum feature network via depth distribution estimation
13 | Args:
14 | model_cfg: EasyDict, Depth classification network config
15 | downsample_factor: int, Depth map downsample factor
16 | """
17 | super().__init__()
18 | self.model_cfg = model_cfg
19 | self.disc_cfg = model_cfg.DISCRETIZE
20 | self.downsample_factor = downsample_factor
21 |
22 | # Create modules
23 | self.ddn = ddn.__all__[model_cfg.DDN.NAME](
24 | num_classes=self.disc_cfg["num_bins"] + 1,
25 | backbone_name=model_cfg.DDN.BACKBONE_NAME,
26 | **model_cfg.DDN.ARGS
27 | )
28 | self.channel_reduce = BasicBlock2D(**model_cfg.CHANNEL_REDUCE)
29 | self.ddn_loss = ddn_loss.__all__[model_cfg.LOSS.NAME](
30 | disc_cfg=self.disc_cfg,
31 | downsample_factor=downsample_factor,
32 | **model_cfg.LOSS.ARGS
33 | )
34 | self.forward_ret_dict = {}
35 |
36 | def get_output_feature_dim(self):
37 | return self.channel_reduce.out_channels
38 |
39 | def forward(self, batch_dict):
40 | """
41 | Predicts depths and creates image depth feature volume using depth distributions
42 | Args:
43 | batch_dict:
44 | images: (N, 3, H_in, W_in), Input images
45 | Returns:
46 | batch_dict:
47 | frustum_features: (N, C, D, H_out, W_out), Image depth features
48 | """
49 | # Pixel-wise depth classification
50 | images = batch_dict["images"]
51 | ddn_result = self.ddn(images)
52 | image_features = ddn_result["features"]
53 | depth_logits = ddn_result["logits"]
54 |
55 | # Channel reduce
56 | if self.channel_reduce is not None:
57 | image_features = self.channel_reduce(image_features)
58 |
59 | # Create image feature plane-sweep volume
60 | frustum_features = self.create_frustum_features(image_features=image_features,
61 | depth_logits=depth_logits)
62 | batch_dict["frustum_features"] = frustum_features
63 |
64 | if self.training:
65 | self.forward_ret_dict["depth_maps"] = batch_dict["depth_maps"]
66 | self.forward_ret_dict["gt_boxes2d"] = batch_dict["gt_boxes2d"]
67 | self.forward_ret_dict["depth_logits"] = depth_logits
68 | return batch_dict
69 |
70 | def create_frustum_features(self, image_features, depth_logits):
71 | """
72 | Create image depth feature volume by multiplying image features with depth distributions
73 | Args:
74 | image_features: (N, C, H, W), Image features
75 | depth_logits: (N, D+1, H, W), Depth classification logits
76 | Returns:
77 | frustum_features: (N, C, D, H, W), Image features
78 | """
79 | channel_dim = 1
80 | depth_dim = 2
81 |
82 | # Resize to match dimensions
83 | image_features = image_features.unsqueeze(depth_dim)
84 | depth_logits = depth_logits.unsqueeze(channel_dim)
85 |
86 | # Apply softmax along depth axis and remove last depth category (> Max Range)
87 | depth_probs = F.softmax(depth_logits, dim=depth_dim)
88 | depth_probs = depth_probs[:, :, :-1]
89 |
90 | # Multiply to form image depth feature volume
91 | frustum_features = depth_probs * image_features
92 | return frustum_features
93 |
94 | def get_loss(self):
95 | """
96 | Gets DDN loss
97 | Args:
98 | Returns:
99 | loss: (1), Depth distribution network loss
100 | tb_dict: dict[float], All losses to log in tensorboard
101 | """
102 | loss, tb_dict = self.ddn_loss(**self.forward_ret_dict)
103 | return loss, tb_dict
104 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/mean_vfe.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from .vfe_template import VFETemplate
4 |
5 |
6 | class MeanVFE(VFETemplate):
7 | def __init__(self, model_cfg, num_point_features, **kwargs):
8 | super().__init__(model_cfg=model_cfg)
9 | self.num_point_features = num_point_features # 4
10 |
11 | def get_output_feature_dim(self):
12 | return self.num_point_features
13 |
14 | def forward(self, batch_dict, **kwargs):
15 | """
16 | Args:
17 | batch_dict:
18 | voxels: (num_voxels, max_points_per_voxel, C) (64000, 5, 4)
19 | voxel_num_points: optional (num_voxels) (64000)
20 | **kwargs:
21 |
22 | Returns:
23 | vfe_features: (num_voxels, C)
24 | """
25 | voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points'] # (64000, 5, 4), (64000)
26 | points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False) # 求每个voxel内点坐标的和 # (64000, 4)
27 | normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features) # 正则化-->(64000, 1), 防止除0
28 | points_mean = points_mean / normalizer # 求每个voxel内点坐标的平均值 # (64000, 4)
29 | batch_dict['voxel_features'] = points_mean.contiguous() # 将voxel_features信息加入batch_dict
30 |
31 | return batch_dict
32 |
--------------------------------------------------------------------------------
/pcdet/models/backbones_3d/vfe/vfe_template.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class VFETemplate(nn.Module):
5 | def __init__(self, model_cfg, **kwargs):
6 | super().__init__()
7 | self.model_cfg = model_cfg # NAME: MeanVFE
8 |
9 | def get_output_feature_dim(self):
10 | raise NotImplementedError
11 |
12 | def forward(self, **kwargs):
13 | """
14 | Args:
15 | **kwargs:
16 |
17 | Returns:
18 | batch_dict:
19 | ...
20 | vfe_features: (num_voxels, C)
21 | """
22 | raise NotImplementedError
23 |
--------------------------------------------------------------------------------
/pcdet/models/dense_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_head_multi import AnchorHeadMulti
2 | from .anchor_head_single import AnchorHeadSingle
3 | from .anchor_head_template import AnchorHeadTemplate
4 | from .point_head_box import PointHeadBox
5 | from .point_head_simple import PointHeadSimple
6 | from .point_intra_part_head import PointIntraPartOffsetHead
7 |
8 | __all__ = {
9 | 'AnchorHeadTemplate': AnchorHeadTemplate,
10 | 'AnchorHeadSingle': AnchorHeadSingle,
11 | 'PointIntraPartOffsetHead': PointIntraPartOffsetHead,
12 | 'PointHeadSimple': PointHeadSimple,
13 | 'PointHeadBox': PointHeadBox,
14 | 'AnchorHeadMulti': AnchorHeadMulti,
15 | }
16 |
--------------------------------------------------------------------------------
/pcdet/models/dense_heads/anchor_head_single.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch.nn as nn
3 |
4 | from .anchor_head_template import AnchorHeadTemplate
5 |
6 |
7 | class AnchorHeadSingle(AnchorHeadTemplate):
8 | def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
9 | predict_boxes_when_training=True, **kwargs):
10 | """
11 | Args:
12 | model_cfg: AnchorHeadSingle的配置
13 | input_channels:384 输入通道数
14 | num_class: 3
15 | class_names: ['Car','Pedestrian','Cyclist']
16 | grid_size: (432,493,1)
17 | point_cloud_range:(0, -39.68, -3, 69.12, 39.68, 1)
18 | predict_boxes_when_training:False
19 | """
20 | super().__init__(
21 | model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
22 | predict_boxes_when_training=predict_boxes_when_training
23 | )
24 |
25 | self.num_anchors_per_location = sum(self.num_anchors_per_location) # 2*3=6
26 | # Conv2d(384,18,kernel_size=(1,1),stride=(1,1))
27 | self.conv_cls = nn.Conv2d(
28 | input_channels, self.num_anchors_per_location * self.num_class, # 6*3=18
29 | kernel_size=1
30 | )
31 | # Conv2d(384,42,kernel_size=(1,1),stride=(1,1))
32 | self.conv_box = nn.Conv2d(
33 | input_channels, self.num_anchors_per_location * self.box_coder.code_size, # 6*7=42
34 | kernel_size=1
35 | )
36 | # 如果存在方向损失,则添加方向卷积层Conv2d(384,12,kernel_size=(1,1),stride=(1,1))
37 | if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
38 | self.conv_dir_cls = nn.Conv2d(
39 | input_channels,
40 | self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS, # 6*2=12
41 | kernel_size=1
42 | )
43 | else:
44 | self.conv_dir_cls = None
45 | self.init_weights()
46 |
47 | def init_weights(self):
48 | # 参数初始化
49 | pi = 0.01
50 | nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
51 | nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
52 |
53 | def forward(self, data_dict):
54 | spatial_features_2d = data_dict['spatial_features_2d'] # (4,384,248,216)
55 |
56 | cls_preds = self.conv_cls(spatial_features_2d) # 每个anchor的类别预测-->(4,18,248,216)
57 | box_preds = self.conv_box(spatial_features_2d) # 每个anchor的box预测-->(4,42,248,216)
58 |
59 | cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
60 | box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C] -->(4,248,216,42)
61 | # 将预测结果存入前传结果字典
62 | self.forward_ret_dict['cls_preds'] = cls_preds
63 | self.forward_ret_dict['box_preds'] = box_preds
64 | # 如果存在方向卷积层,则继续处理方向
65 | if self.conv_dir_cls is not None:
66 | dir_cls_preds = self.conv_dir_cls(spatial_features_2d) # 每个anchor的方向预测-->(4,12,248,216)
67 | dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C] -->(4,248,216,12)
68 | self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
69 | else:
70 | dir_cls_preds = None
71 |
72 | if self.training:
73 | # targets_dict = {
74 | # 'box_cls_labels': cls_labels, # (4,321408)
75 | # 'box_reg_targets': bbox_targets, # (4,321408,7)
76 | # 'reg_weights': reg_weights # (4,321408)
77 | # }
78 | targets_dict = self.assign_targets(
79 | gt_boxes=data_dict['gt_boxes'] # (4,39,8)
80 | )
81 | self.forward_ret_dict.update(targets_dict)
82 |
83 | # 如果不是训练模式,则直接进行box的预测或对于双阶段网络要生成proposal(此时batch不为1)
84 | if not self.training or self.predict_boxes_when_training:
85 | # 输入为最开始的类别和box以及方向的预测,输出为展开后的预测
86 | batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
87 | batch_size=data_dict['batch_size'],
88 | cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
89 | )
90 | data_dict['batch_cls_preds'] = batch_cls_preds # (1, 321408, 3)
91 | data_dict['batch_box_preds'] = batch_box_preds # (1, 321408, 7)
92 | data_dict['cls_preds_normalized'] = False
93 |
94 | return data_dict
95 |
--------------------------------------------------------------------------------
/pcdet/models/dense_heads/point_head_box.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from ...utils import box_coder_utils, box_utils
4 | from .point_head_template import PointHeadTemplate
5 |
6 |
7 | class PointHeadBox(PointHeadTemplate):
8 | """
9 | A simple point-based segmentation head, which are used for PointRCNN.
10 | Reference Paper: https://arxiv.org/abs/1812.04244
11 | PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud
12 | """
13 | def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):
14 | super().__init__(model_cfg=model_cfg, num_class=num_class)
15 | self.predict_boxes_when_training = predict_boxes_when_training
16 | self.cls_layers = self.make_fc_layers(
17 | fc_cfg=self.model_cfg.CLS_FC,
18 | input_channels=input_channels,
19 | output_channels=num_class
20 | )
21 |
22 | target_cfg = self.model_cfg.TARGET_CONFIG
23 | self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
24 | **target_cfg.BOX_CODER_CONFIG
25 | )
26 | self.box_layers = self.make_fc_layers(
27 | fc_cfg=self.model_cfg.REG_FC,
28 | input_channels=input_channels,
29 | output_channels=self.box_coder.code_size
30 | )
31 |
32 | def assign_targets(self, input_dict):
33 | """
34 | Args:
35 | input_dict:
36 | point_features: (N1 + N2 + N3 + ..., C)
37 | batch_size:
38 | point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
39 | gt_boxes (optional): (B, M, 8)
40 | Returns:
41 | point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
42 | point_part_labels: (N1 + N2 + N3 + ..., 3)
43 | """
44 | point_coords = input_dict['point_coords']
45 | gt_boxes = input_dict['gt_boxes']
46 | assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
47 | assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
48 |
49 | batch_size = gt_boxes.shape[0]
50 | extend_gt_boxes = box_utils.enlarge_box3d(
51 | gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
52 | ).view(batch_size, -1, gt_boxes.shape[-1])
53 | targets_dict = self.assign_stack_targets(
54 | points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
55 | set_ignore_flag=True, use_ball_constraint=False,
56 | ret_part_labels=False, ret_box_labels=True
57 | )
58 |
59 | return targets_dict
60 |
61 | def get_loss(self, tb_dict=None):
62 | tb_dict = {} if tb_dict is None else tb_dict
63 | point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
64 | point_loss_box, tb_dict_2 = self.get_box_layer_loss()
65 |
66 | point_loss = point_loss_cls + point_loss_box
67 | tb_dict.update(tb_dict_1)
68 | tb_dict.update(tb_dict_2)
69 | return point_loss, tb_dict
70 |
71 | def forward(self, batch_dict):
72 | """
73 | Args:
74 | batch_dict:
75 | batch_size:
76 | point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
77 | point_features_before_fusion: (N1 + N2 + N3 + ..., C)
78 | point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
79 | point_labels (optional): (N1 + N2 + N3 + ...)
80 | gt_boxes (optional): (B, M, 8)
81 | Returns:
82 | batch_dict:
83 | point_cls_scores: (N1 + N2 + N3 + ..., 1)
84 | point_part_offset: (N1 + N2 + N3 + ..., 3)
85 | """
86 | if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
87 | point_features = batch_dict['point_features_before_fusion']
88 | else:
89 | point_features = batch_dict['point_features']
90 | point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
91 | point_box_preds = self.box_layers(point_features) # (total_points, box_code_size)
92 |
93 | point_cls_preds_max, _ = point_cls_preds.max(dim=-1)
94 | batch_dict['point_cls_scores'] = torch.sigmoid(point_cls_preds_max)
95 |
96 | ret_dict = {'point_cls_preds': point_cls_preds,
97 | 'point_box_preds': point_box_preds}
98 | if self.training:
99 | targets_dict = self.assign_targets(batch_dict)
100 | ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
101 | ret_dict['point_box_labels'] = targets_dict['point_box_labels']
102 |
103 | if not self.training or self.predict_boxes_when_training:
104 | point_cls_preds, point_box_preds = self.generate_predicted_boxes(
105 | points=batch_dict['point_coords'][:, 1:4],
106 | point_cls_preds=point_cls_preds, point_box_preds=point_box_preds
107 | )
108 | batch_dict['batch_cls_preds'] = point_cls_preds
109 | batch_dict['batch_box_preds'] = point_box_preds
110 | batch_dict['batch_index'] = batch_dict['point_coords'][:, 0]
111 | batch_dict['cls_preds_normalized'] = False
112 |
113 | self.forward_ret_dict = ret_dict
114 |
115 | return batch_dict
116 |
--------------------------------------------------------------------------------
/pcdet/models/dense_heads/point_head_simple.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from ...utils import box_utils
4 | from .point_head_template import PointHeadTemplate
5 |
6 |
7 | class PointHeadSimple(PointHeadTemplate):
8 | """
9 | A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
10 | Reference Paper: https://arxiv.org/abs/1912.13192
11 | PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
12 | """
13 | def __init__(self, num_class, input_channels, model_cfg, **kwargs):
14 | super().__init__(model_cfg=model_cfg, num_class=num_class)
15 | self.cls_layers = self.make_fc_layers(
16 | fc_cfg=self.model_cfg.CLS_FC,
17 | input_channels=input_channels,
18 | output_channels=num_class
19 | )
20 |
21 | def assign_targets(self, input_dict):
22 | """
23 | Args:
24 | input_dict:
25 | point_features: (N1 + N2 + N3 + ..., C)
26 | batch_size:
27 | point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
28 | gt_boxes (optional): (B, M, 8)
29 | Returns:
30 | point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
31 | point_part_labels: (N1 + N2 + N3 + ..., 3)
32 | """
33 | point_coords = input_dict['point_coords']
34 | gt_boxes = input_dict['gt_boxes']
35 | assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
36 | assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape)
37 |
38 | batch_size = gt_boxes.shape[0]
39 | extend_gt_boxes = box_utils.enlarge_box3d(
40 | gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH
41 | ).view(batch_size, -1, gt_boxes.shape[-1])
42 | targets_dict = self.assign_stack_targets(
43 | points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes,
44 | set_ignore_flag=True, use_ball_constraint=False,
45 | ret_part_labels=False
46 | )
47 |
48 | return targets_dict
49 |
50 | def get_loss(self, tb_dict=None):
51 | tb_dict = {} if tb_dict is None else tb_dict
52 | point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
53 |
54 | point_loss = point_loss_cls
55 | tb_dict.update(tb_dict_1)
56 | return point_loss, tb_dict
57 |
58 | def forward(self, batch_dict):
59 | """
60 | Args:
61 | batch_dict:
62 | batch_size:
63 | point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
64 | point_features_before_fusion: (N1 + N2 + N3 + ..., C)
65 | point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
66 | point_labels (optional): (N1 + N2 + N3 + ...)
67 | gt_boxes (optional): (B, M, 8)
68 | Returns:
69 | batch_dict:
70 | point_cls_scores: (N1 + N2 + N3 + ..., 1)
71 | point_part_offset: (N1 + N2 + N3 + ..., 3)
72 | """
73 | if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
74 | point_features = batch_dict['point_features_before_fusion']
75 | else:
76 | point_features = batch_dict['point_features']
77 | point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
78 |
79 | ret_dict = {
80 | 'point_cls_preds': point_cls_preds,
81 | }
82 |
83 | point_cls_scores = torch.sigmoid(point_cls_preds)
84 | batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1)
85 |
86 | if self.training:
87 | targets_dict = self.assign_targets(batch_dict)
88 | ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
89 | self.forward_ret_dict = ret_dict
90 |
91 | return batch_dict
92 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/PartA2_net.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 |
3 |
4 | class PartA2Net(Detector3DTemplate):
5 | def __init__(self, model_cfg, num_class, dataset):
6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
7 | self.module_list = self.build_networks()
8 |
9 | def forward(self, batch_dict):
10 | for cur_module in self.module_list:
11 | batch_dict = cur_module(batch_dict)
12 |
13 | if self.training:
14 | loss, tb_dict, disp_dict = self.get_training_loss()
15 |
16 | ret_dict = {
17 | 'loss': loss
18 | }
19 | return ret_dict, tb_dict, disp_dict
20 | else:
21 | pred_dicts, recall_dicts = self.post_processing(batch_dict)
22 | return pred_dicts, recall_dicts
23 |
24 | def get_training_loss(self):
25 | disp_dict = {}
26 | loss_rpn, tb_dict = self.dense_head.get_loss()
27 | loss_point, tb_dict = self.point_head.get_loss(tb_dict)
28 | loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
29 |
30 | loss = loss_rpn + loss_point + loss_rcnn
31 | return loss, tb_dict, disp_dict
32 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/__init__.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 | from .PartA2_net import PartA2Net
3 | from .point_rcnn import PointRCNN
4 | from .pointpillar import PointPillar
5 | from .pv_rcnn import PVRCNN
6 | from .second_net import SECONDNet
7 | from .second_net_iou import SECONDNetIoU
8 | from .caddn import CaDDN
9 | from .voxel_rcnn import VoxelRCNN
10 |
11 | __all__ = {
12 | 'Detector3DTemplate': Detector3DTemplate,
13 | 'SECONDNet': SECONDNet,
14 | 'PartA2Net': PartA2Net,
15 | 'PVRCNN': PVRCNN,
16 | 'PointPillar': PointPillar,
17 | 'PointRCNN': PointRCNN,
18 | 'SECONDNetIoU': SECONDNetIoU,
19 | 'CaDDN': CaDDN,
20 | 'VoxelRCNN': VoxelRCNN
21 | }
22 |
23 |
24 | def build_detector(model_cfg, num_class, dataset):
25 | """
26 | 根据配置文件中的网络名称,构造对应的模型
27 | """
28 | model = __all__[model_cfg.NAME](
29 | model_cfg=model_cfg, num_class=num_class, dataset=dataset
30 | )
31 |
32 | return model
33 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/caddn.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 |
3 |
4 | class CaDDN(Detector3DTemplate):
5 | def __init__(self, model_cfg, num_class, dataset):
6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
7 | self.module_list = self.build_networks()
8 |
9 | def forward(self, batch_dict):
10 | for cur_module in self.module_list:
11 | batch_dict = cur_module(batch_dict)
12 |
13 | if self.training:
14 | loss, tb_dict, disp_dict = self.get_training_loss()
15 |
16 | ret_dict = {
17 | 'loss': loss
18 | }
19 | return ret_dict, tb_dict, disp_dict
20 | else:
21 | pred_dicts, recall_dicts = self.post_processing(batch_dict)
22 | return pred_dicts, recall_dicts
23 |
24 | def get_training_loss(self):
25 | disp_dict = {}
26 |
27 | loss_rpn, tb_dict_rpn = self.dense_head.get_loss()
28 | loss_depth, tb_dict_depth = self.vfe.get_loss()
29 |
30 | tb_dict = {
31 | 'loss_rpn': loss_rpn.item(),
32 | 'loss_depth': loss_depth.item(),
33 | **tb_dict_rpn,
34 | **tb_dict_depth
35 | }
36 |
37 | loss = loss_rpn + loss_depth
38 | return loss, tb_dict, disp_dict
39 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/point_rcnn.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 |
3 |
4 | class PointRCNN(Detector3DTemplate):
5 | def __init__(self, model_cfg, num_class, dataset):
6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
7 | self.module_list = self.build_networks()
8 |
9 | def forward(self, batch_dict):
10 | for cur_module in self.module_list:
11 | batch_dict = cur_module(batch_dict)
12 |
13 | if self.training:
14 | loss, tb_dict, disp_dict = self.get_training_loss()
15 |
16 | ret_dict = {
17 | 'loss': loss
18 | }
19 | return ret_dict, tb_dict, disp_dict
20 | else:
21 | pred_dicts, recall_dicts = self.post_processing(batch_dict)
22 | return pred_dicts, recall_dicts
23 |
24 | def get_training_loss(self):
25 | disp_dict = {}
26 | loss_point, tb_dict = self.point_head.get_loss()
27 | loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
28 |
29 | loss = loss_point + loss_rcnn
30 | return loss, tb_dict, disp_dict
31 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/pointpillar.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 |
3 |
4 | class PointPillar(Detector3DTemplate):
5 | def __init__(self, model_cfg, num_class, dataset):
6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
7 | # demo.py中调用的是models中__init__.py中的build_network(),返回的是该网络的类
8 | # 这里调用的是Detector3DTemplate中的build_networks(),
9 | # 差一个s,这里返回的是各个模块的列表
10 | self.module_list = self.build_networks()
11 |
12 | def forward(self, batch_dict):
13 | # Detector3DTemplate构造好所有模块
14 | # 这里根据模型配置文件生成的配置列表逐个调用
15 | for cur_module in self.module_list:
16 | batch_dict = cur_module(batch_dict)
17 | # 如果在训练模式下,则获取loss
18 | if self.training:
19 | loss, tb_dict, disp_dict = self.get_training_loss()
20 |
21 | ret_dict = {
22 | 'loss': loss
23 | }
24 | return ret_dict, tb_dict, disp_dict
25 | # 在测试模式下,对预测结果进行后处理
26 | else:
27 | # pred_dicts:预测结果
28 | # record_dict = {
29 | # 'pred_boxes': final_boxes,
30 | # 'pred_scores': final_scores,
31 | # 'pred_labels': final_labels
32 | # }
33 | # recall_dicts:根据全部训练数据得到的召回率
34 | pred_dicts, recall_dicts = self.post_processing(batch_dict)
35 | return pred_dicts, recall_dicts
36 |
37 | def get_training_loss(self):
38 | disp_dict = {}
39 |
40 | loss_rpn, tb_dict = self.dense_head.get_loss()
41 | tb_dict = {
42 | 'loss_rpn': loss_rpn.item(),
43 | **tb_dict
44 | } # 此时tb_dict中包含loss_rpn, cls_loss, reg_loss和rpn_loss
45 |
46 | loss = loss_rpn
47 | return loss, tb_dict, disp_dict
48 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/pv_rcnn.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 |
3 |
4 | class PVRCNN(Detector3DTemplate):
5 | def __init__(self, model_cfg, num_class, dataset):
6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
7 | self.module_list = self.build_networks()
8 |
9 | def forward(self, batch_dict):
10 | for cur_module in self.module_list:
11 | batch_dict = cur_module(batch_dict)
12 |
13 | if self.training:
14 | loss, tb_dict, disp_dict = self.get_training_loss()
15 |
16 | ret_dict = {
17 | 'loss': loss
18 | }
19 | return ret_dict, tb_dict, disp_dict
20 | else:
21 | pred_dicts, recall_dicts = self.post_processing(batch_dict)
22 | return pred_dicts, recall_dicts
23 |
24 | def get_training_loss(self):
25 | disp_dict = {}
26 | loss_rpn, tb_dict = self.dense_head.get_loss()
27 | loss_point, tb_dict = self.point_head.get_loss(tb_dict)
28 | loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
29 |
30 | loss = loss_rpn + loss_point + loss_rcnn
31 | return loss, tb_dict, disp_dict
32 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/second_net.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 |
3 |
4 | class SECONDNet(Detector3DTemplate):
5 | def __init__(self, model_cfg, num_class, dataset):
6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
7 | self.module_list = self.build_networks()
8 |
9 | def forward(self, batch_dict):
10 | for cur_module in self.module_list:
11 | batch_dict = cur_module(batch_dict)
12 |
13 | if self.training:
14 | loss, tb_dict, disp_dict = self.get_training_loss()
15 |
16 | ret_dict = {
17 | 'loss': loss
18 | }
19 | return ret_dict, tb_dict, disp_dict
20 | else:
21 | pred_dicts, recall_dicts = self.post_processing(batch_dict)
22 | return pred_dicts, recall_dicts
23 |
24 | def get_training_loss(self):
25 | disp_dict = {}
26 |
27 | loss_rpn, tb_dict = self.dense_head.get_loss()
28 | tb_dict = {
29 | 'loss_rpn': loss_rpn.item(),
30 | **tb_dict
31 | }
32 |
33 | loss = loss_rpn
34 | return loss, tb_dict, disp_dict
35 |
--------------------------------------------------------------------------------
/pcdet/models/detectors/voxel_rcnn.py:
--------------------------------------------------------------------------------
1 | from .detector3d_template import Detector3DTemplate
2 |
3 |
4 | class VoxelRCNN(Detector3DTemplate):
5 | def __init__(self, model_cfg, num_class, dataset):
6 | super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
7 | self.module_list = self.build_networks() # 各模块的instance
8 |
9 | def forward(self, batch_dict):
10 | # 逐模块调用forward函数
11 | for cur_module in self.module_list:
12 | batch_dict = cur_module(batch_dict)
13 |
14 | # 如果在训练模式下,则获取loss
15 | if self.training:
16 | loss, tb_dict, disp_dict = self.get_training_loss()
17 |
18 | ret_dict = {
19 | 'loss': loss
20 | }
21 | return ret_dict, tb_dict, disp_dict
22 | # 在测试模式下,对预测结果进行后处理
23 | else:
24 | """
25 | pred_dicts = {
26 | 'pred_boxes': final_boxes,
27 | 'pred_scores': final_scores,
28 | 'pred_labels': final_labels
29 | }
30 | recall_dicts:根据全部训练数据得到的召回率
31 | """
32 | pred_dicts, recall_dicts = self.post_processing(batch_dict)
33 | return pred_dicts, recall_dicts
34 |
35 | def get_training_loss(self):
36 | disp_dict = {}
37 | loss = 0
38 | # 计算rpn loss,tb_dict是一个字典包含了{'rpn_loss_cls', 'rpn_loss_loc', 'rpn_loss_dir'}
39 | loss_rpn, tb_dict = self.dense_head.get_loss()
40 | # rcnn loss
41 | loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
42 |
43 | loss = loss + loss_rpn + loss_rcnn
44 | return loss, tb_dict, disp_dict
45 |
--------------------------------------------------------------------------------
/pcdet/models/model_utils/basic_block_2d.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class BasicBlock2D(nn.Module):
5 |
6 | def __init__(self, in_channels, out_channels, **kwargs):
7 | """
8 | Initializes convolutional block
9 | Args:
10 | in_channels: int, Number of input channels
11 | out_channels: int, Number of output channels
12 | **kwargs: Dict, Extra arguments for nn.Conv2d
13 | """
14 | super().__init__()
15 | self.in_channels = in_channels
16 | self.out_channels = out_channels
17 | self.conv = nn.Conv2d(in_channels=in_channels,
18 | out_channels=out_channels,
19 | **kwargs)
20 | self.bn = nn.BatchNorm2d(out_channels)
21 | self.relu = nn.ReLU(inplace=True)
22 |
23 | def forward(self, features):
24 | """
25 | Applies convolutional block
26 | Args:
27 | features: (B, C_in, H, W), Input features
28 | Returns:
29 | x: (B, C_out, H, W), Output features
30 | """
31 | x = self.conv(features)
32 | x = self.bn(x)
33 | x = self.relu(x)
34 | return x
35 |
--------------------------------------------------------------------------------
/pcdet/models/model_utils/model_nms_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from ...ops.iou3d_nms import iou3d_nms_utils
4 |
5 |
6 | def class_agnostic_nms(box_scores, box_preds, nms_config, score_thresh=None):
7 | # 1.首先根据置信度阈值过滤掉部分box
8 | src_box_scores = box_scores
9 | if score_thresh is not None:
10 | scores_mask = (box_scores >= score_thresh)
11 | box_scores = box_scores[scores_mask]
12 | box_preds = box_preds[scores_mask]
13 |
14 | selected = []
15 | if box_scores.shape[0] > 0:
16 | # 如果剩余的box数量大于0.则选出分数前k个box
17 | box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
18 | boxes_for_nms = box_preds[indices]
19 | # 调用iou3d_nms_utils的nms_gpu函数进行nms,返回的是被保留下的box的索引,selected_scores = None
20 | # 这里传入的box已经是根据分数进行排序的,但是在nms_gpu又进行了一次排序(感觉有点重复)
21 | keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
22 | boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
23 | )
24 | # 根据返回索引找出box索引值
25 | selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
26 |
27 | if score_thresh is not None:
28 | # 如果存在置信读阈值,scores_mask是box_scores在src_box_scores中的索引,即原始索引
29 | original_idxs = scores_mask.nonzero().view(-1)
30 | # selected表示的box_scores的选择索引,经过这次索引,selected表示的是src_box_scores被选择的索引
31 | selected = original_idxs[selected]
32 | # 总结:双重索引,值得学习
33 | return selected, src_box_scores[selected]
34 |
35 |
36 | def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
37 | """
38 | Args:
39 | cls_scores: (N, num_class)
40 | box_preds: (N, 7 + C)
41 | nms_config:
42 | score_thresh:
43 |
44 | Returns:
45 |
46 | """
47 | pred_scores, pred_labels, pred_boxes = [], [], []
48 | # 逐类别进行NMS
49 | for k in range(cls_scores.shape[1]):
50 | if score_thresh is not None:
51 | # 如果分数阈值不空,则首先过滤掉分数过低的box
52 | scores_mask = (cls_scores[:, k] >= score_thresh)
53 | box_scores = cls_scores[scores_mask, k]
54 | cur_box_preds = box_preds[scores_mask]
55 | else:
56 | box_scores = cls_scores[:, k]
57 | cur_box_preds = box_preds
58 |
59 | selected = []
60 | if box_scores.shape[0] > 0:
61 | # 选出前k个分数值和索引
62 | box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
63 | # 获取前k个预测box
64 | boxes_for_nms = cur_box_preds[indices]
65 | # nms_config.NMS_TYPE: nms_gpu
66 | # 输入为box,box分数和NMS阈值
67 | # 输出为保留下的索引
68 | keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
69 | boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
70 | )
71 | # 取出前NMS_POST_MAXSIZE=500作为被选择的
72 | selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
73 | # 筛选被选择的box
74 | pred_scores.append(box_scores[selected])
75 | pred_labels.append(box_scores.new_ones(len(selected)).long() * k) # 预测类别都是k
76 | pred_boxes.append(cur_box_preds[selected])
77 | # 将预测结果拼接
78 | pred_scores = torch.cat(pred_scores, dim=0)
79 | pred_labels = torch.cat(pred_labels, dim=0)
80 | pred_boxes = torch.cat(pred_boxes, dim=0)
81 |
82 | return pred_scores, pred_labels, pred_boxes
83 |
--------------------------------------------------------------------------------
/pcdet/models/roi_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .partA2_head import PartA2FCHead
2 | from .pointrcnn_head import PointRCNNHead
3 | from .pvrcnn_head import PVRCNNHead
4 | from .second_head import SECONDHead
5 | from .voxelrcnn_head import VoxelRCNNHead
6 | from .roi_head_template import RoIHeadTemplate
7 |
8 |
9 | __all__ = {
10 | 'RoIHeadTemplate': RoIHeadTemplate,
11 | 'PartA2FCHead': PartA2FCHead,
12 | 'PVRCNNHead': PVRCNNHead,
13 | 'SECONDHead': SECONDHead,
14 | 'PointRCNNHead': PointRCNNHead,
15 | 'VoxelRCNNHead': VoxelRCNNHead
16 | }
17 |
--------------------------------------------------------------------------------
/pcdet/ops/iou3d_nms/iou3d_nms_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | 3D IoU Calculation and Rotated NMS
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | """
6 | import torch
7 |
8 | from ...utils import common_utils
9 | from . import iou3d_nms_cuda
10 |
11 |
12 | def boxes_bev_iou_cpu(boxes_a, boxes_b):
13 | """
14 | Args:
15 | boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
16 | boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
17 |
18 | Returns:
19 | ans_iou: (N, M)
20 | """
21 | boxes_a, is_numpy = common_utils.check_numpy_to_torch(boxes_a)
22 | boxes_b, is_numpy = common_utils.check_numpy_to_torch(boxes_b)
23 | assert not (boxes_a.is_cuda or boxes_b.is_cuda), 'Only support CPU tensors'
24 | assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7
25 | ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
26 | iou3d_nms_cuda.boxes_iou_bev_cpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
27 |
28 | return ans_iou.numpy() if is_numpy else ans_iou
29 |
30 |
31 | def boxes_iou_bev(boxes_a, boxes_b):
32 | """
33 | Args:
34 | boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
35 | boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
36 |
37 | Returns:
38 | ans_iou: (N, M)
39 | """
40 | assert boxes_a.shape[1] == boxes_b.shape[1] == 7
41 | ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()
42 |
43 | iou3d_nms_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
44 |
45 | return ans_iou
46 |
47 |
48 | def boxes_iou3d_gpu(boxes_a, boxes_b):
49 | """
50 | Args:
51 | boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
52 | boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
53 |
54 | Returns:
55 | ans_iou: (N, M)
56 | """
57 | assert boxes_a.shape[1] == boxes_b.shape[1] == 7
58 |
59 | # height overlap
60 | boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1) # (N, 1) --> (13, 1)
61 | boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
62 | boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1) # (1, M)--> (1, 2)
63 | boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1)
64 |
65 | # bev overlap
66 | # 定义bev视角的iou结果
67 | overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M) --> (13, 2)
68 | # 调用cuda函数计算bev视角iou
69 | iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev)
70 | # 取高度最小值的最大值(这里利用了broadcast机制)
71 | max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) # (N, M) --> (13, 2)
72 | # 取高度最大值的最小值
73 | min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) # (N, M) --> (13, 2)
74 | overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) # 规范化
75 |
76 | # 3d iou
77 | overlaps_3d = overlaps_bev * overlaps_h # 求空间中交集的面积
78 |
79 | vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1) # (N, 1) --> (13, 1)
80 | vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1) # (1, M)--> (1, 2)
81 |
82 | iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6) # 正则化
83 |
84 | return iou3d
85 |
86 |
87 | def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs):
88 | """
89 | :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
90 | :param scores: (N)
91 | :param thresh:
92 | :return:
93 | """
94 | assert boxes.shape[1] == 7
95 | # 对分数按列降序排序(从大到小),并取出对应索引
96 | # dim=0 按列排序,dim=1 按行排序,默认 dim=1
97 | # 因为传入的scores已经有序,所以order就是[1 2 3 ...]
98 | order = scores.sort(0, descending=True)[1]
99 | # 如果存在NMS前的最大box数量(4096),则取出前4096个box索引
100 | if pre_maxsize is not None:
101 | order = order[:pre_maxsize]
102 |
103 | # 取出NMS前的box
104 | boxes = boxes[order].contiguous()
105 | keep = torch.LongTensor(boxes.size(0)) # 构造一个4096维向量
106 | # 调用cuda函数进行加速
107 | # keep:记录保留目标框的下标
108 | # num_out:返回保留下来的个数
109 | num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh)
110 | # 最后, 之所以所以要取前num_out个数的原因是keep初始的长度为4096
111 | return order[keep[:num_out].cuda()].contiguous(), None
112 |
113 |
114 | def nms_normal_gpu(boxes, scores, thresh, **kwargs):
115 | """
116 | :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
117 | :param scores: (N)
118 | :param thresh:
119 | :return:
120 | """
121 | assert boxes.shape[1] == 7
122 | order = scores.sort(0, descending=True)[1]
123 |
124 | boxes = boxes[order].contiguous()
125 |
126 | keep = torch.LongTensor(boxes.size(0))
127 | num_out = iou3d_nms_cuda.nms_normal_gpu(boxes, keep, thresh)
128 | return order[keep[:num_out].cuda()].contiguous(), None
129 |
--------------------------------------------------------------------------------
/pcdet/ops/iou3d_nms/src/iou3d_cpu.h:
--------------------------------------------------------------------------------
1 | #ifndef IOU3D_CPU_H
2 | #define IOU3D_CPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor);
10 |
11 | #endif
12 |
--------------------------------------------------------------------------------
/pcdet/ops/iou3d_nms/src/iou3d_nms.h:
--------------------------------------------------------------------------------
1 | #ifndef IOU3D_NMS_H
2 | #define IOU3D_NMS_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap);
10 | int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou);
11 | int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
12 | int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
13 |
14 | #endif
15 |
--------------------------------------------------------------------------------
/pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 |
7 | // 把写好的头文件包含进来
8 | #include "iou3d_cpu.h"
9 | #include "iou3d_nms.h"
10 |
11 |
12 | // 使用PYBIND11_MODULE进行函数绑定,在中被包含
13 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
14 | // 第一参数表示在python中调用时的函数名
15 | // 第二个参数是.h文件中定义的函数
16 | // 第三个参数是python中调用help所产生的提示
17 | m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes overlap");
18 | m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou");
19 | m.def("nms_gpu", &nms_gpu, "oriented nms gpu");
20 | m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu");
21 | m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou");
22 | }
23 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "ball_query_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 | #define CHECK_CUDA(x) do { \
18 | if (!x.type().is_cuda()) { \
19 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
20 | exit(-1); \
21 | } \
22 | } while (0)
23 | #define CHECK_CONTIGUOUS(x) do { \
24 | if (!x.is_contiguous()) { \
25 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
26 | exit(-1); \
27 | } \
28 | } while (0)
29 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
30 |
31 |
32 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
33 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) {
34 | CHECK_INPUT(new_xyz_tensor);
35 | CHECK_INPUT(xyz_tensor);
36 | const float *new_xyz = new_xyz_tensor.data();
37 | const float *xyz = xyz_tensor.data();
38 | int *idx = idx_tensor.data();
39 |
40 | ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx);
41 | return 1;
42 | }
43 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | #include "ball_query_gpu.h"
12 | #include "cuda_utils.h"
13 |
14 |
15 | __global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
16 | const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
17 | // new_xyz: (B, M, 3)
18 | // xyz: (B, N, 3)
19 | // output:
20 | // idx: (B, M, nsample)
21 | int bs_idx = blockIdx.y;
22 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
23 | if (bs_idx >= b || pt_idx >= m) return;
24 |
25 | new_xyz += bs_idx * m * 3 + pt_idx * 3;
26 | xyz += bs_idx * n * 3;
27 | idx += bs_idx * m * nsample + pt_idx * nsample;
28 |
29 | float radius2 = radius * radius;
30 | float new_x = new_xyz[0];
31 | float new_y = new_xyz[1];
32 | float new_z = new_xyz[2];
33 |
34 | int cnt = 0;
35 | for (int k = 0; k < n; ++k) {
36 | float x = xyz[k * 3 + 0];
37 | float y = xyz[k * 3 + 1];
38 | float z = xyz[k * 3 + 2];
39 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
40 | if (d2 < radius2){
41 | if (cnt == 0){
42 | for (int l = 0; l < nsample; ++l) {
43 | idx[l] = k;
44 | }
45 | }
46 | idx[cnt] = k;
47 | ++cnt;
48 | if (cnt >= nsample) break;
49 | }
50 | }
51 | }
52 |
53 |
54 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
55 | const float *new_xyz, const float *xyz, int *idx) {
56 | // new_xyz: (B, M, 3)
57 | // xyz: (B, N, 3)
58 | // output:
59 | // idx: (B, M, nsample)
60 |
61 | cudaError_t err;
62 |
63 | dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
64 | dim3 threads(THREADS_PER_BLOCK);
65 |
66 | ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
67 | // cudaDeviceSynchronize(); // for using printf in kernel function
68 | err = cudaGetLastError();
69 | if (cudaSuccess != err) {
70 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
71 | exit(-1);
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _BALL_QUERY_GPU_H
2 | #define _BALL_QUERY_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
10 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor);
11 |
12 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample,
13 | const float *xyz, const float *new_xyz, int *idx);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h:
--------------------------------------------------------------------------------
1 | #ifndef _CUDA_UTILS_H
2 | #define _CUDA_UTILS_H
3 |
4 | #include
5 |
6 | #define TOTAL_THREADS 1024
7 | #define THREADS_PER_BLOCK 256
8 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
9 |
10 | inline int opt_n_threads(int work_size) {
11 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0);
12 |
13 | return max(min(1 << pow_2, TOTAL_THREADS), 1);
14 | }
15 | #endif
16 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of point grouping, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "group_points_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 |
18 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
19 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
20 |
21 | float *grad_points = grad_points_tensor.data();
22 | const int *idx = idx_tensor.data();
23 | const float *grad_out = grad_out_tensor.data();
24 |
25 | group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points);
26 | return 1;
27 | }
28 |
29 |
30 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
31 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) {
32 |
33 | const float *points = points_tensor.data();
34 | const int *idx = idx_tensor.data();
35 | float *out = out_tensor.data();
36 |
37 | group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out);
38 | return 1;
39 | }
40 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.cu:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of point grouping, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 | #include
8 | #include
9 |
10 | #include "cuda_utils.h"
11 | #include "group_points_gpu.h"
12 |
13 |
14 | __global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample,
15 | const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) {
16 | // grad_out: (B, C, npoints, nsample)
17 | // idx: (B, npoints, nsample)
18 | // output:
19 | // grad_points: (B, C, N)
20 | int bs_idx = blockIdx.z;
21 | int c_idx = blockIdx.y;
22 | int index = blockIdx.x * blockDim.x + threadIdx.x;
23 | int pt_idx = index / nsample;
24 | if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
25 |
26 | int sample_idx = index % nsample;
27 | grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
28 | idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
29 |
30 | atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]);
31 | }
32 |
33 | void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
34 | const float *grad_out, const int *idx, float *grad_points) {
35 | // grad_out: (B, C, npoints, nsample)
36 | // idx: (B, npoints, nsample)
37 | // output:
38 | // grad_points: (B, C, N)
39 | cudaError_t err;
40 | dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
41 | dim3 threads(THREADS_PER_BLOCK);
42 |
43 | group_points_grad_kernel_fast<<>>(b, c, n, npoints, nsample, grad_out, idx, grad_points);
44 |
45 | err = cudaGetLastError();
46 | if (cudaSuccess != err) {
47 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
48 | exit(-1);
49 | }
50 | }
51 |
52 |
53 | __global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample,
54 | const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) {
55 | // points: (B, C, N)
56 | // idx: (B, npoints, nsample)
57 | // output:
58 | // out: (B, C, npoints, nsample)
59 | int bs_idx = blockIdx.z;
60 | int c_idx = blockIdx.y;
61 | int index = blockIdx.x * blockDim.x + threadIdx.x;
62 | int pt_idx = index / nsample;
63 | if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
64 |
65 | int sample_idx = index % nsample;
66 |
67 | idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
68 | int in_idx = bs_idx * c * n + c_idx * n + idx[0];
69 | int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
70 |
71 | out[out_idx] = points[in_idx];
72 | }
73 |
74 |
75 | void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
76 | const float *points, const int *idx, float *out) {
77 | // points: (B, C, N)
78 | // idx: (B, npoints, nsample)
79 | // output:
80 | // out: (B, C, npoints, nsample)
81 | cudaError_t err;
82 | dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
83 | dim3 threads(THREADS_PER_BLOCK);
84 |
85 | group_points_kernel_fast<<>>(b, c, n, npoints, nsample, points, idx, out);
86 | // cudaDeviceSynchronize(); // for using printf in kernel function
87 | err = cudaGetLastError();
88 | if (cudaSuccess != err) {
89 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
90 | exit(-1);
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _GROUP_POINTS_GPU_H
2 | #define _GROUP_POINTS_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 |
10 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
11 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
12 |
13 | void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
14 | const float *points, const int *idx, float *out);
15 |
16 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
18 |
19 | void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
20 | const float *grad_out, const int *idx, float *grad_points);
21 |
22 | #endif
23 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of point interpolation, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include "interpolate_gpu.h"
17 |
18 | extern THCState *state;
19 |
20 |
21 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
22 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) {
23 | const float *unknown = unknown_tensor.data();
24 | const float *known = known_tensor.data();
25 | float *dist2 = dist2_tensor.data();
26 | int *idx = idx_tensor.data();
27 |
28 | three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx);
29 | }
30 |
31 |
32 | void three_interpolate_wrapper_fast(int b, int c, int m, int n,
33 | at::Tensor points_tensor,
34 | at::Tensor idx_tensor,
35 | at::Tensor weight_tensor,
36 | at::Tensor out_tensor) {
37 |
38 | const float *points = points_tensor.data();
39 | const float *weight = weight_tensor.data();
40 | float *out = out_tensor.data();
41 | const int *idx = idx_tensor.data();
42 |
43 | three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out);
44 | }
45 |
46 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m,
47 | at::Tensor grad_out_tensor,
48 | at::Tensor idx_tensor,
49 | at::Tensor weight_tensor,
50 | at::Tensor grad_points_tensor) {
51 |
52 | const float *grad_out = grad_out_tensor.data();
53 | const float *weight = weight_tensor.data();
54 | float *grad_points = grad_points_tensor.data();
55 | const int *idx = idx_tensor.data();
56 |
57 | three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points);
58 | }
59 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _INTERPOLATE_GPU_H
2 | #define _INTERPOLATE_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 |
10 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
11 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor);
12 |
13 | void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
14 | const float *known, float *dist2, int *idx);
15 |
16 |
17 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor,
18 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor);
19 |
20 | void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
21 | const float *points, const int *idx, const float *weight, float *out);
22 |
23 |
24 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor,
25 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor);
26 |
27 | void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
28 | const int *idx, const float *weight, float *grad_points);
29 |
30 | #endif
31 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "ball_query_gpu.h"
5 | #include "group_points_gpu.h"
6 | #include "sampling_gpu.h"
7 | #include "interpolate_gpu.h"
8 |
9 |
10 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
11 | m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast");
12 |
13 | m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast");
14 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast");
15 |
16 | m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast");
17 | m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast");
18 |
19 | m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper");
20 |
21 | m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast");
22 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast");
23 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast");
24 | }
25 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2018.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include "sampling_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 |
18 | int gather_points_wrapper_fast(int b, int c, int n, int npoints,
19 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){
20 | const float *points = points_tensor.data();
21 | const int *idx = idx_tensor.data();
22 | float *out = out_tensor.data();
23 |
24 | gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out);
25 | return 1;
26 | }
27 |
28 |
29 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
30 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
31 |
32 | const float *grad_out = grad_out_tensor.data();
33 | const int *idx = idx_tensor.data();
34 | float *grad_points = grad_points_tensor.data();
35 |
36 | gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points);
37 | return 1;
38 | }
39 |
40 |
41 | int furthest_point_sampling_wrapper(int b, int n, int m,
42 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) {
43 |
44 | const float *points = points_tensor.data();
45 | float *temp = temp_tensor.data();
46 | int *idx = idx_tensor.data();
47 |
48 | furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx);
49 | return 1;
50 | }
51 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _SAMPLING_GPU_H
2 | #define _SAMPLING_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 |
9 | int gather_points_wrapper_fast(int b, int c, int n, int npoints,
10 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
11 |
12 | void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints,
13 | const float *points, const int *idx, float *out);
14 |
15 |
16 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
18 |
19 | void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints,
20 | const float *grad_out, const int *idx, float *grad_points);
21 |
22 |
23 | int furthest_point_sampling_wrapper(int b, int n, int m,
24 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor);
25 |
26 | void furthest_point_sampling_kernel_launcher(int b, int n, int m,
27 | const float *dataset, float *temp, int *idxs);
28 |
29 | #endif
30 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "ball_query_gpu.h"
14 |
15 | extern THCState *state;
16 |
17 | #define CHECK_CUDA(x) do { \
18 | if (!x.type().is_cuda()) { \
19 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
20 | exit(-1); \
21 | } \
22 | } while (0)
23 | #define CHECK_CONTIGUOUS(x) do { \
24 | if (!x.is_contiguous()) { \
25 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
26 | exit(-1); \
27 | } \
28 | } while (0)
29 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
30 |
31 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample,
32 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor,
33 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor) {
34 | CHECK_INPUT(new_xyz_tensor);
35 | CHECK_INPUT(xyz_tensor);
36 | CHECK_INPUT(new_xyz_batch_cnt_tensor);
37 | CHECK_INPUT(xyz_batch_cnt_tensor);
38 |
39 | const float *new_xyz = new_xyz_tensor.data();
40 | const float *xyz = xyz_tensor.data();
41 | const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data();
42 | const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data();
43 | int *idx = idx_tensor.data();
44 |
45 | ball_query_kernel_launcher_stack(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx);
46 | return 1;
47 | }
48 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.cu:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 |
12 | #include "ball_query_gpu.h"
13 | #include "cuda_utils.h"
14 |
15 |
16 | __global__ void ball_query_kernel_stack(int B, int M, float radius, int nsample, \
17 | const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx) {
18 | // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features
19 | // :param xyz_batch_cnt: (batch_size), [N1, N2, ...]
20 | // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query
21 | // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
22 | // output:
23 | // idx: (M, nsample)
24 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
25 | if (pt_idx >= M) return;
26 |
27 | int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
28 | for (int k = 1; k < B; k++){
29 | if (pt_idx < pt_cnt) break;
30 | pt_cnt += new_xyz_batch_cnt[k];
31 | bs_idx = k;
32 | }
33 |
34 | int xyz_batch_start_idx = 0;
35 | for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
36 | // for (int k = 0; k < bs_idx; k++) new_xyz_batch_start_idx += new_xyz_batch_cnt[k];
37 |
38 | new_xyz += pt_idx * 3;
39 | xyz += xyz_batch_start_idx * 3;
40 | idx += pt_idx * nsample;
41 |
42 | float radius2 = radius * radius;
43 | float new_x = new_xyz[0];
44 | float new_y = new_xyz[1];
45 | float new_z = new_xyz[2];
46 | int n = xyz_batch_cnt[bs_idx];
47 |
48 | int cnt = 0;
49 | for (int k = 0; k < n; ++k) {
50 | float x = xyz[k * 3 + 0];
51 | float y = xyz[k * 3 + 1];
52 | float z = xyz[k * 3 + 2];
53 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
54 | if (d2 < radius2){
55 | if (cnt == 0){
56 | for (int l = 0; l < nsample; ++l) {
57 | idx[l] = k;
58 | }
59 | }
60 | idx[cnt] = k;
61 | ++cnt;
62 | if (cnt >= nsample) break;
63 | }
64 | }
65 | if (cnt == 0) idx[0] = -1;
66 | }
67 |
68 |
69 | void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample,
70 | const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx){
71 | // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features
72 | // :param xyz_batch_cnt: (batch_size), [N1, N2, ...]
73 | // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query
74 | // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
75 | // output:
76 | // idx: (M, nsample)
77 |
78 | cudaError_t err;
79 |
80 | dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
81 | dim3 threads(THREADS_PER_BLOCK);
82 |
83 | ball_query_kernel_stack<<>>(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx);
84 | // cudaDeviceSynchronize(); // for using printf in kernel function
85 | err = cudaGetLastError();
86 | if (cudaSuccess != err) {
87 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
88 | exit(-1);
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #ifndef _STACK_BALL_QUERY_GPU_H
9 | #define _STACK_BALL_QUERY_GPU_H
10 |
11 | #include
12 | #include
13 | #include
14 | #include
15 |
16 | int ball_query_wrapper_stack(int B, int M, float radius, int nsample,
17 | at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor,
18 | at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor);
19 |
20 |
21 | void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample,
22 | const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx);
23 |
24 |
25 | #endif
26 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h:
--------------------------------------------------------------------------------
1 | #ifndef _STACK_CUDA_UTILS_H
2 | #define _STACK_CUDA_UTILS_H
3 |
4 | #include
5 |
6 | #define THREADS_PER_BLOCK 256
7 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
8 |
9 | #endif
10 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include "group_points_gpu.h"
14 |
15 | extern THCState *state;
16 | #define CHECK_CUDA(x) do { \
17 | if (!x.type().is_cuda()) { \
18 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
19 | exit(-1); \
20 | } \
21 | } while (0)
22 | #define CHECK_CONTIGUOUS(x) do { \
23 | if (!x.is_contiguous()) { \
24 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
25 | exit(-1); \
26 | } \
27 | } while (0)
28 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
29 |
30 |
31 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample,
32 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor,
33 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor) {
34 |
35 | CHECK_INPUT(grad_out_tensor);
36 | CHECK_INPUT(idx_tensor);
37 | CHECK_INPUT(idx_batch_cnt_tensor);
38 | CHECK_INPUT(features_batch_cnt_tensor);
39 | CHECK_INPUT(grad_features_tensor);
40 |
41 | const float *grad_out = grad_out_tensor.data();
42 | const int *idx = idx_tensor.data();
43 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data();
44 | const int *features_batch_cnt = features_batch_cnt_tensor.data();
45 | float *grad_features = grad_features_tensor.data();
46 |
47 | group_points_grad_kernel_launcher_stack(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features);
48 | return 1;
49 | }
50 |
51 |
52 | int group_points_wrapper_stack(int B, int M, int C, int nsample,
53 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor,
54 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor) {
55 |
56 | CHECK_INPUT(features_tensor);
57 | CHECK_INPUT(features_batch_cnt_tensor);
58 | CHECK_INPUT(idx_tensor);
59 | CHECK_INPUT(idx_batch_cnt_tensor);
60 | CHECK_INPUT(out_tensor);
61 |
62 | const float *features = features_tensor.data();
63 | const int *idx = idx_tensor.data();
64 | const int *features_batch_cnt = features_batch_cnt_tensor.data();
65 | const int *idx_batch_cnt = idx_batch_cnt_tensor.data();
66 | float *out = out_tensor.data();
67 |
68 | group_points_kernel_launcher_stack(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out);
69 | return 1;
70 | }
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #ifndef _STACK_GROUP_POINTS_GPU_H
9 | #define _STACK_GROUP_POINTS_GPU_H
10 |
11 | #include
12 | #include
13 | #include
14 | #include
15 |
16 |
17 | int group_points_wrapper_stack(int B, int M, int C, int nsample,
18 | at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor,
19 | at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor);
20 |
21 | void group_points_kernel_launcher_stack(int B, int M, int C, int nsample,
22 | const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out);
23 |
24 | int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample,
25 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor,
26 | at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor);
27 |
28 | void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample,
29 | const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features);
30 |
31 | #endif
32 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | Stacked-batch-data version of point interpolation, modified from the original implementation of official PointNet++ codes.
3 | Written by Shaoshuai Shi
4 | All Rights Reserved 2019-2020.
5 | */
6 |
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include "interpolate_gpu.h"
17 |
18 | extern THCState *state;
19 |
20 | #define CHECK_CUDA(x) do { \
21 | if (!x.type().is_cuda()) { \
22 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
23 | exit(-1); \
24 | } \
25 | } while (0)
26 | #define CHECK_CONTIGUOUS(x) do { \
27 | if (!x.is_contiguous()) { \
28 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
29 | exit(-1); \
30 | } \
31 | } while (0)
32 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
33 |
34 |
35 | void three_nn_wrapper_stack(at::Tensor unknown_tensor,
36 | at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor,
37 | at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor){
38 | // unknown: (N1 + N2 ..., 3)
39 | // unknown_batch_cnt: (batch_size), [N1, N2, ...]
40 | // known: (M1 + M2 ..., 3)
41 | // known_batch_cnt: (batch_size), [M1, M2, ...]
42 | // Return:
43 | // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
44 | // idx: (N1 + N2 ..., 3) index of the three nearest neighbors
45 | CHECK_INPUT(unknown_tensor);
46 | CHECK_INPUT(unknown_batch_cnt_tensor);
47 | CHECK_INPUT(known_tensor);
48 | CHECK_INPUT(known_batch_cnt_tensor);
49 | CHECK_INPUT(dist2_tensor);
50 | CHECK_INPUT(idx_tensor);
51 |
52 | int batch_size = unknown_batch_cnt_tensor.size(0);
53 | int N = unknown_tensor.size(0);
54 | int M = known_tensor.size(0);
55 | const float *unknown = unknown_tensor.data();
56 | const int *unknown_batch_cnt = unknown_batch_cnt_tensor.data();
57 | const float *known = known_tensor.data();
58 | const int *known_batch_cnt = known_batch_cnt_tensor.data();
59 | float *dist2 = dist2_tensor.data();
60 | int *idx = idx_tensor.data();
61 |
62 | three_nn_kernel_launcher_stack(batch_size, N, M, unknown, unknown_batch_cnt, known, known_batch_cnt, dist2, idx);
63 | }
64 |
65 |
66 | void three_interpolate_wrapper_stack(at::Tensor features_tensor,
67 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor) {
68 | // features_tensor: (M1 + M2 ..., C)
69 | // idx_tensor: [N1 + N2 ..., 3]
70 | // weight_tensor: [N1 + N2 ..., 3]
71 | // Return:
72 | // out_tensor: (N1 + N2 ..., C)
73 | CHECK_INPUT(features_tensor);
74 | CHECK_INPUT(idx_tensor);
75 | CHECK_INPUT(weight_tensor);
76 | CHECK_INPUT(out_tensor);
77 |
78 | int N = out_tensor.size(0);
79 | int channels = features_tensor.size(1);
80 | const float *features = features_tensor.data();
81 | const float *weight = weight_tensor.data();
82 | const int *idx = idx_tensor.data();
83 | float *out = out_tensor.data();
84 |
85 | three_interpolate_kernel_launcher_stack(N, channels, features, idx, weight, out);
86 | }
87 |
88 |
89 | void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor,
90 | at::Tensor weight_tensor, at::Tensor grad_features_tensor) {
91 | // grad_out_tensor: (N1 + N2 ..., C)
92 | // idx_tensor: [N1 + N2 ..., 3]
93 | // weight_tensor: [N1 + N2 ..., 3]
94 | // Return:
95 | // grad_features_tensor: (M1 + M2 ..., C)
96 | CHECK_INPUT(grad_out_tensor);
97 | CHECK_INPUT(idx_tensor);
98 | CHECK_INPUT(weight_tensor);
99 | CHECK_INPUT(grad_features_tensor);
100 |
101 | int N = grad_out_tensor.size(0);
102 | int channels = grad_out_tensor.size(1);
103 | const float *grad_out = grad_out_tensor.data();
104 | const float *weight = weight_tensor.data();
105 | const int *idx = idx_tensor.data();
106 | float *grad_features = grad_features_tensor.data();
107 |
108 | // printf("N=%d, channels=%d\n", N, channels);
109 | three_interpolate_grad_kernel_launcher_stack(N, channels, grad_out, idx, weight, grad_features);
110 | }
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _INTERPOLATE_GPU_H
2 | #define _INTERPOLATE_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 |
10 | void three_nn_wrapper_stack(at::Tensor unknown_tensor,
11 | at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor,
12 | at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor);
13 |
14 |
15 | void three_interpolate_wrapper_stack(at::Tensor features_tensor,
16 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor);
17 |
18 |
19 |
20 | void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor,
21 | at::Tensor weight_tensor, at::Tensor grad_features_tensor);
22 |
23 |
24 | void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown,
25 | const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt,
26 | float *dist2, int *idx);
27 |
28 |
29 | void three_interpolate_kernel_launcher_stack(int N, int channels,
30 | const float *features, const int *idx, const float *weight, float *out);
31 |
32 |
33 |
34 | void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out,
35 | const int *idx, const float *weight, float *grad_features);
36 |
37 |
38 |
39 | #endif
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "ball_query_gpu.h"
5 | #include "group_points_gpu.h"
6 | #include "sampling_gpu.h"
7 | #include "interpolate_gpu.h"
8 | #include "voxel_query_gpu.h"
9 |
10 |
11 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
12 | m.def("ball_query_wrapper", &ball_query_wrapper_stack, "ball_query_wrapper_stack");
13 | m.def("voxel_query_wrapper", &voxel_query_wrapper_stack, "voxel_query_wrapper_stack");
14 |
15 | m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper");
16 |
17 | m.def("group_points_wrapper", &group_points_wrapper_stack, "group_points_wrapper_stack");
18 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_stack, "group_points_grad_wrapper_stack");
19 |
20 | m.def("three_nn_wrapper", &three_nn_wrapper_stack, "three_nn_wrapper_stack");
21 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_stack, "three_interpolate_wrapper_stack");
22 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_stack, "three_interpolate_grad_wrapper_stack");
23 | }
24 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 |
6 | #include "sampling_gpu.h"
7 |
8 | extern THCState *state;
9 | #define CHECK_CUDA(x) do { \
10 | if (!x.type().is_cuda()) { \
11 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
12 | exit(-1); \
13 | } \
14 | } while (0)
15 | #define CHECK_CONTIGUOUS(x) do { \
16 | if (!x.is_contiguous()) { \
17 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
18 | exit(-1); \
19 | } \
20 | } while (0)
21 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
22 |
23 |
24 | int furthest_point_sampling_wrapper(int b, int n, int m,
25 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) {
26 |
27 | CHECK_INPUT(points_tensor);
28 | CHECK_INPUT(temp_tensor);
29 | CHECK_INPUT(idx_tensor);
30 |
31 | const float *points = points_tensor.data();
32 | float *temp = temp_tensor.data();
33 | int *idx = idx_tensor.data();
34 |
35 | furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx);
36 | return 1;
37 | }
38 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _SAMPLING_GPU_H
2 | #define _SAMPLING_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 |
9 | int furthest_point_sampling_wrapper(int b, int n, int m,
10 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor);
11 |
12 | void furthest_point_sampling_kernel_launcher(int b, int n, int m,
13 | const float *dataset, float *temp, int *idxs);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "voxel_query_gpu.h"
10 |
11 | extern THCState *state;
12 |
13 | #define CHECK_CUDA(x) do { \
14 | if (!x.type().is_cuda()) { \
15 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
16 | exit(-1); \
17 | } \
18 | } while (0)
19 | #define CHECK_CONTIGUOUS(x) do { \
20 | if (!x.is_contiguous()) { \
21 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
22 | exit(-1); \
23 | } \
24 | } while (0)
25 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
26 |
27 |
28 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius,
29 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor,
30 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor) {
31 | // 对输入tensor进行检查
32 | CHECK_INPUT(new_coords_tensor);
33 | CHECK_INPUT(point_indices_tensor);
34 | CHECK_INPUT(new_xyz_tensor);
35 | CHECK_INPUT(xyz_tensor);
36 |
37 | // 将tensor的data赋予对应的指针
38 | const float *new_xyz = new_xyz_tensor.data();
39 | const float *xyz = xyz_tensor.data();
40 | const int *new_coords = new_coords_tensor.data();
41 | const int *point_indices = point_indices_tensor.data();
42 | int *idx = idx_tensor.data();
43 | // 调用核函数启动器
44 | voxel_query_kernel_launcher_stack(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx);
45 | return 1;
46 | }
47 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.cu:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 |
6 | #include "voxel_query_gpu.h"
7 | #include "cuda_utils.h"
8 |
9 |
10 | __global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3, int nsample,
11 | float radius, int z_range, int y_range, int x_range, const float *new_xyz,
12 | const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
13 | // :param new_coords: (M1 + M2 ..., 4) centers of the ball query
14 | // :param point_indices: (B, Z, Y, X)
15 | // output:
16 | // idx: (M1 + M2, nsample)
17 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; // 计算线程id,一个线程处理一个网格点
18 | if (pt_idx >= M) return;
19 |
20 | new_xyz += pt_idx * 3; // 网格点云坐标的间隔是3(x,y,z)
21 | new_coords += pt_idx * 4; // voxle坐标的间隔是4(batch_index, x, y, z)
22 | idx += pt_idx * nsample; // idx的间隔是nsample(16)
23 |
24 | curandState state; // 创建一个随机算法状态的对象
25 | curand_init(pt_idx, 0, 0, &state); // 对状态进行初始化
26 |
27 | float radius2 = radius * radius; // 计算半径阈值
28 | // 提取网格点云坐标
29 | float new_x = new_xyz[0];
30 | float new_y = new_xyz[1];
31 | float new_z = new_xyz[2];
32 | // 提取网格voxel坐标
33 | int batch_idx = new_coords[0];
34 | int new_coords_z = new_coords[1];
35 | int new_coords_y = new_coords[2];
36 | int new_coords_x = new_coords[3];
37 |
38 | int cnt = 0;
39 | // 以网格voxel坐标为单位,向上下左右前后range范围内的查询voxel
40 | // M, R1, R2, R3 --> eg: 8, 21, 800, 704
41 | for (int dz = -1*z_range; dz <= z_range; ++dz) {
42 | int z_coord = new_coords_z + dz;
43 | if (z_coord < 0 || z_coord >= R1) continue;
44 |
45 | for (int dy = -1*y_range; dy <= y_range; ++dy) {
46 | int y_coord = new_coords_y + dy;
47 | if (y_coord < 0 || y_coord >= R2) continue;
48 |
49 | for (int dx = -1*x_range; dx <= x_range; ++dx) {
50 | int x_coord = new_coords_x + dx;
51 | if (x_coord < 0 || x_coord >= R3) continue;
52 |
53 | int index = batch_idx * R1 * R2 * R3 + \
54 | z_coord * R2 * R3 + \
55 | y_coord * R3 + \
56 | x_coord; // 计算该坐标在point_indices中的索引
57 | int neighbor_idx = point_indices[index]; // 提取网格点的neighbor的索引
58 | if (neighbor_idx < 0) continue; // 如果网格索引小于0则跳过
59 |
60 | // 提取该网格点neighbor的点云坐标
61 | float x_per = xyz[neighbor_idx*3 + 0];
62 | float y_per = xyz[neighbor_idx*3 + 1];
63 | float z_per = xyz[neighbor_idx*3 + 2];
64 |
65 | // 计算两点之间的距离
66 | float dist2 = (x_per - new_x) * (x_per - new_x) + (y_per - new_y) * (y_per - new_y) + (z_per - new_z) * (z_per - new_z);
67 |
68 | // 如果距离大于半径阈值则跳过
69 | if (dist2 > radius2) continue;
70 |
71 | if (cnt < nsample) {
72 | if (cnt == 0) {
73 | for (int l = 0; l < nsample; ++l) {
74 | idx[l] = neighbor_idx; // 将neighbor全部初始化为第一个点
75 | }
76 | }
77 | idx[cnt] = neighbor_idx; // 以idx为起始地址进行赋值
78 | ++cnt; // 点数+1
79 | }
80 | }
81 | }
82 | }
83 | if (cnt == 0) idx[0] = -1; // 如果没有找到neighbor则将第一个neighbor赋值为-1
84 | }
85 |
86 |
87 | void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample,
88 | float radius, int z_range, int y_range, int x_range, const float *new_xyz,
89 | const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
90 | // :param new_coords: (M1 + M2 ..., 4) centers of the voxel query
91 | // :param point_indices: (B, Z, Y, X)
92 | // output:
93 | // idx: (M1 + M2, nsample)
94 |
95 | cudaError_t err;
96 | // 分配线程
97 | dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) 一维block
98 | dim3 threads(THREADS_PER_BLOCK); // 一维线程
99 |
100 | // 启动核函数
101 | voxel_query_kernel_stack<<>>(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx);
102 | // cudaDeviceSynchronize(); // for using printf in kernel function
103 |
104 | // 固定套路,错误检查
105 | err = cudaGetLastError();
106 | if (cudaSuccess != err) {
107 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
108 | exit(-1);
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.h:
--------------------------------------------------------------------------------
1 | #ifndef _STACK_VOXEL_QUERY_GPU_H
2 | #define _STACK_VOXEL_QUERY_GPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius,
10 | int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor,
11 | at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor);
12 |
13 |
14 | void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample,
15 | float radius, int z_range, int y_range, int x_range, const float *new_xyz,
16 | const float *xyz, const int *new_coords, const int *point_indices, int *idx);
17 |
18 |
19 | #endif
20 |
--------------------------------------------------------------------------------
/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable
3 | from torch.autograd import Function
4 | import torch.nn as nn
5 | from typing import List
6 |
7 | from . import pointnet2_stack_cuda as pointnet2
8 | from . import pointnet2_utils
9 |
10 | class VoxelQuery(Function):
11 |
12 | @staticmethod
13 | def forward(ctx, max_range: int, radius: float, nsample: int, xyz: torch.Tensor, \
14 | new_xyz: torch.Tensor, new_coords: torch.Tensor, point_indices: torch.Tensor):
15 | """
16 | Args:
17 | ctx:
18 | max_range: int, max range of voxels to be grouped (voxle的单位)
19 | radius:点云坐标半径范围
20 | nsample: int, maximum number of features in the balls
21 | xyz:voxel点云坐标 (204916, 3)
22 | new_xyz: roi grid点云坐标 (221184, 3) --> 两组点云坐标用于计算距离,配合raduis判断点是否合格
23 | new_coords: (M1 + M2, 4), [batch_id, z, y, x] cooridnates of keypoints 用于查询
24 | point_indices: (batch_size, Z, Y, X) 4-D tensor recording the point indices of voxels
25 | Returns:
26 | idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
27 | """
28 | assert new_xyz.is_contiguous()
29 | assert xyz.is_contiguous()
30 | assert new_coords.is_contiguous()
31 | assert point_indices.is_contiguous()
32 |
33 | M = new_coords.shape[0] # 221184
34 | B, Z, Y, X = point_indices.shape # 8, 21, 800, 704
35 | idx = torch.cuda.IntTensor(M, nsample).zero_() # (221184, 16)
36 |
37 | z_range, y_range, x_range = max_range # 4, 4, 4
38 | pointnet2.voxel_query_wrapper(M, Z, Y, X, nsample, radius, z_range, y_range, x_range, \
39 | new_xyz, xyz, new_coords, point_indices, idx)
40 |
41 | empty_ball_mask = (idx[:, 0] == -1) # 将第一个neighbor的index为-1的网格点标记为未查询到neighbor
42 | idx[empty_ball_mask] = 0 # 将未查询到neighbor的网格点的neighbor index设置为0
43 |
44 | return idx, empty_ball_mask
45 |
46 | @staticmethod
47 | def backward(ctx, a=None):
48 | return None, None, None, None
49 |
50 | voxel_query = VoxelQuery.apply
51 |
52 |
53 | class VoxelQueryAndGrouping(nn.Module):
54 | def __init__(self, max_range: int, radius: float, nsample: int):
55 | """
56 | Args:
57 | radius: float, radius of ball
58 | nsample: int, maximum number of features to gather in the ball
59 | """
60 | super().__init__()
61 | self.max_range, self.radius, self.nsample = max_range, radius, nsample # [4,4,4], [0.4], [16]
62 |
63 | def forward(self, new_coords: torch.Tensor, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
64 | new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor,
65 | features: torch.Tensor, voxel2point_indices: torch.Tensor):
66 | """
67 | Args:
68 | new_coords: (M1 + M2 ..., 3) centers voxel indices of the ball query
69 | xyz: (N1 + N2 ..., 3) xyz coordinates of the features
70 | xyz_batch_cnt: (batch_size), [N1, N2, ...] 点云中有效坐标的数量
71 | new_xyz: (M1 + M2 ..., 3) centers of the ball query
72 | new_xyz_batch_cnt: (batch_size), [M1, M2, ...] # roi grid中有效坐标的数量
73 | features: (N1 + N2 ..., C) tensor of features to group
74 | voxel2point_indices: (B, Z, Y, X) tensor of points indices of voxels
75 |
76 | Returns:
77 | new_features: (M1 + M2, C, nsample) tensor
78 | """
79 | assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt))
80 | assert new_coords.shape[0] == new_xyz_batch_cnt.sum(), \
81 | 'new_coords: %s, new_xyz_batch_cnt: %s' % (str(new_coords.shape), str(new_xyz_batch_cnt))
82 | batch_size = xyz_batch_cnt.shape[0] # 获取batch size:8
83 |
84 | # idx: (M1 + M2 ..., nsample) --> (221184, 16) roi网格点的聚合特征位置索引
85 | # empty_ball_mask: (M1 + M2 ...) --> (221184,) roi网格点是否成功聚合特征
86 | # max_range:[4,4,4], radius:[0.4], nsample:[16]
87 | idx1, empty_ball_mask1 = voxel_query(self.max_range, self.radius, self.nsample, xyz, new_xyz, new_coords, voxel2point_indices)
88 |
89 | idx1 = idx1.view(batch_size, -1, self.nsample) # (8, 27648, 16)
90 | count = 0
91 | # 按照batch减去对应起始地址,计算帧内相对索引
92 | for bs_idx in range(batch_size):
93 | idx1[bs_idx] -= count
94 | count += xyz_batch_cnt[bs_idx]
95 | idx1 = idx1.view(-1, self.nsample) # (221184, 16)
96 | idx1[empty_ball_mask1] = 0 # 将未查询到点的位置赋0
97 |
98 | idx = idx1 # (221184, 16)
99 | empty_ball_mask = empty_ball_mask1 # (221184,)
100 | """
101 | xyz:特征点云坐标 (204916, 3)
102 | xyz_batch_cnt:每帧点云的特征点数量 (8,)
103 | idx:网格点neighbor的index (221184, 16)
104 | new_xyz_batch_cnt:每帧点云的roi网格点数量(8,)
105 | """
106 | grouped_xyz = pointnet2_utils.grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (221184, 3, 16)
107 | # grouped_features: (M1 + M2, C, nsample)
108 | grouped_features = pointnet2_utils.grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (221184, 32, 16)
109 |
110 | return grouped_features, grouped_xyz, empty_ball_mask
111 |
--------------------------------------------------------------------------------
/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch.autograd import Function
4 |
5 | from ...utils import common_utils
6 | from . import roiaware_pool3d_cuda
7 |
8 |
9 | def points_in_boxes_cpu(points, boxes):
10 | """
11 | Args:
12 | points: (num_points, 3)
13 | boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
14 | Returns:
15 | point_indices: (N, num_points)
16 | """
17 | assert boxes.shape[1] == 7
18 | assert points.shape[1] == 3
19 | points, is_numpy = common_utils.check_numpy_to_torch(points)
20 | boxes, is_numpy = common_utils.check_numpy_to_torch(boxes)
21 |
22 | point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)
23 | roiaware_pool3d_cuda.points_in_boxes_cpu(boxes.float().contiguous(), points.float().contiguous(), point_indices)
24 |
25 | return point_indices.numpy() if is_numpy else point_indices
26 |
27 |
28 | def points_in_boxes_gpu(points, boxes):
29 | """
30 | :param points: (B, M, 3)
31 | :param boxes: (B, T, 7), num_valid_boxes <= T
32 | :return box_idxs_of_pts: (B, M), default background = -1
33 | """
34 | assert boxes.shape[0] == points.shape[0]
35 | assert boxes.shape[2] == 7 and points.shape[2] == 3
36 | batch_size, num_points, _ = points.shape
37 |
38 | box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1)
39 | roiaware_pool3d_cuda.points_in_boxes_gpu(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
40 |
41 | return box_idxs_of_pts
42 |
43 |
44 | class RoIAwarePool3d(nn.Module):
45 | def __init__(self, out_size, max_pts_each_voxel=128):
46 | super().__init__()
47 | self.out_size = out_size
48 | self.max_pts_each_voxel = max_pts_each_voxel
49 |
50 | def forward(self, rois, pts, pts_feature, pool_method='max'):
51 | assert pool_method in ['max', 'avg']
52 | return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method)
53 |
54 |
55 | class RoIAwarePool3dFunction(Function):
56 | @staticmethod
57 | def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):
58 | """
59 | Args:
60 | ctx:
61 | rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
62 | pts: (npoints, 3)
63 | pts_feature: (npoints, C)
64 | out_size: int or tuple, like 7 or (7, 7, 7)
65 | max_pts_each_voxel:
66 | pool_method: 'max' or 'avg'
67 |
68 | Returns:
69 | pooled_features: (N, out_x, out_y, out_z, C)
70 | """
71 | assert rois.shape[1] == 7 and pts.shape[1] == 3
72 | if isinstance(out_size, int):
73 | out_x = out_y = out_z = out_size
74 | else:
75 | assert len(out_size) == 3
76 | for k in range(3):
77 | assert isinstance(out_size[k], int)
78 | out_x, out_y, out_z = out_size
79 |
80 | num_rois = rois.shape[0]
81 | num_channels = pts_feature.shape[-1]
82 | num_pts = pts.shape[0]
83 |
84 | pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
85 | argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
86 | pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)
87 |
88 | pool_method_map = {'max': 0, 'avg': 1}
89 | pool_method = pool_method_map[pool_method]
90 | roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)
91 |
92 | ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)
93 | return pooled_features
94 |
95 | @staticmethod
96 | def backward(ctx, grad_out):
97 | """
98 | :param grad_out: (N, out_x, out_y, out_z, C)
99 | :return:
100 | grad_in: (npoints, C)
101 | """
102 | pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward
103 |
104 | grad_in = grad_out.new_zeros((num_pts, num_channels))
105 | roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)
106 |
107 | return None, None, grad_in, None, None, None
108 |
109 |
110 | if __name__ == '__main__':
111 | pass
112 |
--------------------------------------------------------------------------------
/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch.autograd import Function
4 |
5 | from ...utils import box_utils
6 | from . import roipoint_pool3d_cuda
7 |
8 |
9 | class RoIPointPool3d(nn.Module):
10 | def __init__(self, num_sampled_points=512, pool_extra_width=1.0):
11 | super().__init__()
12 | self.num_sampled_points = num_sampled_points
13 | self.pool_extra_width = pool_extra_width
14 |
15 | def forward(self, points, point_features, boxes3d):
16 | """
17 | Args:
18 | points: (B, N, 3)
19 | point_features: (B, N, C)
20 | boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading]
21 |
22 | Returns:
23 | pooled_features: (B, M, 512, 3 + C)
24 | pooled_empty_flag: (B, M)
25 | """
26 | return RoIPointPool3dFunction.apply(
27 | points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points
28 | )
29 |
30 |
31 | class RoIPointPool3dFunction(Function):
32 | @staticmethod
33 | def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512):
34 | """
35 | Args:
36 | ctx:
37 | points: (B, N, 3)
38 | point_features: (B, N, C)
39 | boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading]
40 | pool_extra_width:
41 | num_sampled_points:
42 |
43 | Returns:
44 | pooled_features: (B, num_boxes, 512, 3 + C)
45 | pooled_empty_flag: (B, num_boxes)
46 | """
47 | assert points.shape.__len__() == 3 and points.shape[2] == 3
48 | batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[1], point_features.shape[2]
49 | pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7)
50 |
51 | pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, 3 + feature_len))
52 | pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
53 |
54 | roipoint_pool3d_cuda.forward(
55 | points.contiguous(), pooled_boxes3d.contiguous(),
56 | point_features.contiguous(), pooled_features, pooled_empty_flag
57 | )
58 |
59 | return pooled_features, pooled_empty_flag
60 |
61 | @staticmethod
62 | def backward(ctx, grad_out):
63 | raise NotImplementedError
64 |
65 |
66 | if __name__ == '__main__':
67 | pass
68 |
--------------------------------------------------------------------------------
/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #define CHECK_CUDA(x) do { \
5 | if (!x.type().is_cuda()) { \
6 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
7 | exit(-1); \
8 | } \
9 | } while (0)
10 | #define CHECK_CONTIGUOUS(x) do { \
11 | if (!x.is_contiguous()) { \
12 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
13 | exit(-1); \
14 | } \
15 | } while (0)
16 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
17 |
18 |
19 | void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num,
20 | const float *xyz, const float *boxes3d, const float *pts_feature, float *pooled_features, int *pooled_empty_flag);
21 |
22 |
23 | int roipool3d_gpu(at::Tensor xyz, at::Tensor boxes3d, at::Tensor pts_feature, at::Tensor pooled_features, at::Tensor pooled_empty_flag){
24 | // params xyz: (B, N, 3)
25 | // params boxes3d: (B, M, 7)
26 | // params pts_feature: (B, N, C)
27 | // params pooled_features: (B, M, 512, 3+C)
28 | // params pooled_empty_flag: (B, M)
29 | CHECK_INPUT(xyz);
30 | CHECK_INPUT(boxes3d);
31 | CHECK_INPUT(pts_feature);
32 | CHECK_INPUT(pooled_features);
33 | CHECK_INPUT(pooled_empty_flag);
34 |
35 | int batch_size = xyz.size(0);
36 | int pts_num = xyz.size(1);
37 | int boxes_num = boxes3d.size(1);
38 | int feature_in_len = pts_feature.size(2);
39 | int sampled_pts_num = pooled_features.size(2);
40 |
41 |
42 | const float * xyz_data = xyz.data();
43 | const float * boxes3d_data = boxes3d.data();
44 | const float * pts_feature_data = pts_feature.data();
45 | float * pooled_features_data = pooled_features.data();
46 | int * pooled_empty_flag_data = pooled_empty_flag.data();
47 |
48 | roipool3dLauncher(batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num,
49 | xyz_data, boxes3d_data, pts_feature_data, pooled_features_data, pooled_empty_flag_data);
50 |
51 |
52 |
53 | return 1;
54 | }
55 |
56 |
57 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
58 | m.def("forward", &roipool3d_gpu, "roipool3d forward (CUDA)");
59 | }
60 |
61 |
--------------------------------------------------------------------------------
/pcdet/utils/calibration_kitti.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def get_calib_from_file(calib_file):
5 | with open(calib_file) as f:
6 | lines = f.readlines()
7 |
8 | obj = lines[2].strip().split(' ')[1:]
9 | P2 = np.array(obj, dtype=np.float32)
10 | obj = lines[3].strip().split(' ')[1:]
11 | P3 = np.array(obj, dtype=np.float32)
12 | obj = lines[4].strip().split(' ')[1:]
13 | R0 = np.array(obj, dtype=np.float32)
14 | obj = lines[5].strip().split(' ')[1:]
15 | Tr_velo_to_cam = np.array(obj, dtype=np.float32)
16 |
17 | return {'P2': P2.reshape(3, 4),
18 | 'P3': P3.reshape(3, 4),
19 | 'R0': R0.reshape(3, 3),
20 | 'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
21 |
22 |
23 | class Calibration(object):
24 | def __init__(self, calib_file):
25 | if not isinstance(calib_file, dict):
26 | calib = get_calib_from_file(calib_file)
27 | else:
28 | calib = calib_file
29 |
30 | self.P2 = calib['P2'] # 3 x 4
31 | self.R0 = calib['R0'] # 3 x 3
32 | self.V2C = calib['Tr_velo2cam'] # 3 x 4
33 |
34 | # Camera intrinsics and extrinsics
35 | self.cu = self.P2[0, 2]
36 | self.cv = self.P2[1, 2]
37 | self.fu = self.P2[0, 0]
38 | self.fv = self.P2[1, 1]
39 | self.tx = self.P2[0, 3] / (-self.fu)
40 | self.ty = self.P2[1, 3] / (-self.fv)
41 |
42 | def cart_to_hom(self, pts):
43 | """
44 | :param pts: (N, 3 or 2)
45 | :return pts_hom: (N, 4 or 3)
46 | """
47 | pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
48 | return pts_hom
49 |
50 | def rect_to_lidar(self, pts_rect):
51 | """
52 | :param pts_lidar: (N, 3)
53 | :return pts_rect: (N, 3)
54 | """
55 | pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
56 | R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
57 | R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
58 | R0_ext[3, 3] = 1
59 | V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
60 | V2C_ext[3, 3] = 1
61 |
62 | pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
63 | return pts_lidar[:, 0:3]
64 |
65 | def lidar_to_rect(self, pts_lidar):
66 | """
67 | :param pts_lidar: (N, 3)
68 | :return pts_rect: (N, 3)
69 | """
70 | pts_lidar_hom = self.cart_to_hom(pts_lidar)
71 | pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
72 | # pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
73 | return pts_rect
74 |
75 | def rect_to_img(self, pts_rect):
76 | """
77 | :param pts_rect: (N, 3)
78 | :return pts_img: (N, 2)
79 | """
80 | pts_rect_hom = self.cart_to_hom(pts_rect)
81 | pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
82 | pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
83 | pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
84 | return pts_img, pts_rect_depth
85 |
86 | def lidar_to_img(self, pts_lidar):
87 | """
88 | :param pts_lidar: (N, 3)
89 | :return pts_img: (N, 2)
90 | """
91 | pts_rect = self.lidar_to_rect(pts_lidar)
92 | pts_img, pts_depth = self.rect_to_img(pts_rect)
93 | return pts_img, pts_depth
94 |
95 | def img_to_rect(self, u, v, depth_rect):
96 | """
97 | :param u: (N)
98 | :param v: (N)
99 | :param depth_rect: (N)
100 | :return:
101 | """
102 | x = ((u - self.cu) * depth_rect) / self.fu + self.tx
103 | y = ((v - self.cv) * depth_rect) / self.fv + self.ty
104 | pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
105 | return pts_rect
106 |
107 | def corners3d_to_img_boxes(self, corners3d):
108 | """
109 | :param corners3d: (N, 8, 3) corners in rect coordinate
110 | :return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
111 | :return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
112 | """
113 | sample_num = corners3d.shape[0]
114 | corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
115 |
116 | img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
117 |
118 | x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
119 | x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
120 | x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
121 |
122 | boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
123 | boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
124 |
125 | return boxes, boxes_corner
126 |
--------------------------------------------------------------------------------
/pcdet/utils/object3d_kitti.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def get_objects_from_label(label_file):
5 | with open(label_file, 'r') as f:
6 | lines = f.readlines()
7 | objects = [Object3d(line) for line in lines]
8 | return objects
9 |
10 |
11 | def cls_type_to_id(cls_type):
12 | type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
13 | if cls_type not in type_to_id.keys():
14 | return -1
15 | return type_to_id[cls_type]
16 |
17 |
18 | class Object3d(object):
19 | def __init__(self, line):
20 | label = line.strip().split(' ')
21 | self.src = line
22 | self.cls_type = label[0]
23 | self.cls_id = cls_type_to_id(self.cls_type)
24 | self.truncation = float(label[1])
25 | self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
26 | self.alpha = float(label[3])
27 | self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
28 | self.h = float(label[8])
29 | self.w = float(label[9])
30 | self.l = float(label[10])
31 | self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
32 | self.dis_to_cam = np.linalg.norm(self.loc)
33 | self.ry = float(label[14])
34 | self.score = float(label[15]) if label.__len__() == 16 else -1.0
35 | self.level_str = None
36 | self.level = self.get_kitti_obj_level()
37 |
38 | def get_kitti_obj_level(self):
39 | height = float(self.box2d[3]) - float(self.box2d[1]) + 1
40 |
41 | if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
42 | self.level_str = 'Easy'
43 | return 0 # Easy
44 | elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
45 | self.level_str = 'Moderate'
46 | return 1 # Moderate
47 | elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
48 | self.level_str = 'Hard'
49 | return 2 # Hard
50 | else:
51 | self.level_str = 'UnKnown'
52 | return -1
53 |
54 | def generate_corners3d(self):
55 | """
56 | generate corners3d representation for this object
57 | :return corners_3d: (8, 3) corners of box3d in camera coord
58 | """
59 | l, h, w = self.l, self.h, self.w
60 | x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
61 | y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
62 | z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
63 |
64 | R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
65 | [0, 1, 0],
66 | [-np.sin(self.ry), 0, np.cos(self.ry)]])
67 | corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
68 | corners3d = np.dot(R, corners3d).T
69 | corners3d = corners3d + self.loc
70 | return corners3d
71 |
72 | def to_str(self):
73 | print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
74 | % (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
75 | self.loc, self.ry)
76 | return print_str
77 |
78 | def to_kitti_format(self):
79 | kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
80 | % (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
81 | self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],
82 | self.ry)
83 | return kitti_str
84 |
--------------------------------------------------------------------------------
/pcdet/utils/transform_utils.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 |
4 | try:
5 | from kornia.geometry.conversions import (
6 | convert_points_to_homogeneous,
7 | convert_points_from_homogeneous,
8 | )
9 | except:
10 | pass
11 | # print('Warning: kornia is not installed. This package is only required by CaDDN')
12 |
13 |
14 | def project_to_image(project, points):
15 | """
16 | Project points to image
17 | Args:
18 | project [torch.tensor(..., 3, 4)]: Projection matrix
19 | points [torch.Tensor(..., 3)]: 3D points
20 | Returns:
21 | points_img [torch.Tensor(..., 2)]: Points in image
22 | points_depth [torch.Tensor(...)]: Depth of each point
23 | """
24 | # Reshape tensors to expected shape
25 | points = convert_points_to_homogeneous(points)
26 | points = points.unsqueeze(dim=-1)
27 | project = project.unsqueeze(dim=1)
28 |
29 | # Transform points to image and get depths
30 | points_t = project @ points
31 | points_t = points_t.squeeze(dim=-1)
32 | points_img = convert_points_from_homogeneous(points_t)
33 | points_depth = points_t[..., -1] - project[..., 2, 3]
34 |
35 | return points_img, points_depth
36 |
37 |
38 | def normalize_coords(coords, shape):
39 | """
40 | Normalize coordinates of a grid between [-1, 1]
41 | Args:
42 | coords: (..., 3), Coordinates in grid
43 | shape: (3), Grid shape
44 | Returns:
45 | norm_coords: (.., 3), Normalized coordinates in grid
46 | """
47 | min_n = -1
48 | max_n = 1
49 | shape = torch.flip(shape, dims=[0]) # Reverse ordering of shape
50 |
51 | # Subtract 1 since pixel indexing from [0, shape - 1]
52 | norm_coords = coords / (shape - 1) * (max_n - min_n) + min_n
53 | return norm_coords
54 |
55 |
56 | def bin_depths(depth_map, mode, depth_min, depth_max, num_bins, target=False):
57 | """
58 | Converts depth map into bin indices
59 | Args:
60 | depth_map: (H, W), Depth Map
61 | mode: string, Discretiziation mode (See https://arxiv.org/pdf/2005.13423.pdf for more details)
62 | UD: Uniform discretiziation
63 | LID: Linear increasing discretiziation
64 | SID: Spacing increasing discretiziation
65 | depth_min: float, Minimum depth value
66 | depth_max: float, Maximum depth value
67 | num_bins: int, Number of depth bins
68 | target: bool, Whether the depth bins indices will be used for a target tensor in loss comparison
69 | Returns:
70 | indices: (H, W), Depth bin indices
71 | """
72 | if mode == "UD":
73 | bin_size = (depth_max - depth_min) / num_bins
74 | indices = ((depth_map - depth_min) / bin_size)
75 | elif mode == "LID":
76 | bin_size = 2 * (depth_max - depth_min) / (num_bins * (1 + num_bins))
77 | indices = -0.5 + 0.5 * torch.sqrt(1 + 8 * (depth_map - depth_min) / bin_size)
78 | elif mode == "SID":
79 | indices = num_bins * (torch.log(1 + depth_map) - math.log(1 + depth_min)) / \
80 | (math.log(1 + depth_max) - math.log(1 + depth_min))
81 | else:
82 | raise NotImplementedError
83 |
84 | if target:
85 | # Remove indicies outside of bounds
86 | mask = (indices < 0) | (indices > num_bins) | (~torch.isfinite(indices))
87 | indices[mask] = num_bins
88 |
89 | # Convert to integer
90 | indices = indices.type(torch.int64)
91 | return indices
92 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | torch>=1.1
3 | numba
4 | tensorboardX
5 | easydict
6 | pyyaml
7 | scikit-image
8 | tqdm
9 | kornia
10 | torchvision
11 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import subprocess
4 |
5 | from setuptools import find_packages, setup
6 | from setuptools.command.install import install
7 | # TODO: This is a bit buggy since it requires torch before installing torch.
8 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension
9 |
10 |
11 | def get_git_commit_number():
12 | if not os.path.exists('.git'):
13 | return '0000000'
14 |
15 | cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
16 | git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
17 | return git_commit_number
18 |
19 |
20 | def make_cuda_ext(name, module, sources):
21 | cuda_ext = CUDAExtension(
22 | name='%s.%s' % (module, name),
23 | sources=[os.path.join(*module.split('.'), src) for src in sources]
24 | )
25 | return cuda_ext
26 |
27 |
28 | def write_version_to_file(version, target_file):
29 | with open(target_file, 'w') as f:
30 | print('__version__ = "%s"' % version, file=f)
31 |
32 |
33 | class PostInstallation(install):
34 | """Post-installation for installation mode."""
35 | def run(self):
36 | install.run(self)
37 | # Note: buggy for kornia==0.5.3 and it will be fixed in the next version.
38 | # Set kornia to 0.5.2 temporarily
39 | subprocess.call([sys.executable, '-m', 'pip', 'install', 'kornia==0.5.2', '--no-dependencies'])
40 |
41 |
42 | if __name__ == '__main__':
43 | version = '0.3.0+%s' % get_git_commit_number()
44 | write_version_to_file(version, 'pcdet/version.py')
45 |
46 | setup(
47 | name='pcdet',
48 | version=version,
49 | description='OpenPCDet is a general codebase for 3D object detection from point cloud',
50 | install_requires=[
51 | 'numpy',
52 | 'torch>=1.1',
53 | 'spconv',
54 | 'numba',
55 | 'tensorboardX',
56 | 'easydict',
57 | 'pyyaml'
58 | ],
59 | author='Shaoshuai Shi',
60 | author_email='shaoshuaics@gmail.com',
61 | license='Apache License 2.0',
62 | packages=find_packages(exclude=['tools', 'data', 'output']),
63 | cmdclass={
64 | 'build_ext': BuildExtension,
65 | 'install': PostInstallation,
66 | # Post installation cannot be done. ref: https://github.com/pypa/setuptools/issues/1936.
67 | # 'develop': PostInstallation,
68 | },
69 | ext_modules=[
70 | # name定义包名 python中import iou3d_nms_cuda
71 | # module为根路径
72 | # sources定于与包关联的源文件(.h,.cpp,.cu)
73 | make_cuda_ext(
74 | name='iou3d_nms_cuda',
75 | module='pcdet.ops.iou3d_nms',
76 | sources=[
77 | 'src/iou3d_cpu.cpp',
78 | 'src/iou3d_nms_api.cpp',
79 | 'src/iou3d_nms.cpp',
80 | 'src/iou3d_nms_kernel.cu',
81 | ]
82 | ),
83 | make_cuda_ext(
84 | name='roiaware_pool3d_cuda',
85 | module='pcdet.ops.roiaware_pool3d',
86 | sources=[
87 | 'src/roiaware_pool3d.cpp',
88 | 'src/roiaware_pool3d_kernel.cu',
89 | ]
90 | ),
91 | make_cuda_ext(
92 | name='roipoint_pool3d_cuda',
93 | module='pcdet.ops.roipoint_pool3d',
94 | sources=[
95 | 'src/roipoint_pool3d.cpp',
96 | 'src/roipoint_pool3d_kernel.cu',
97 | ]
98 | ),
99 | make_cuda_ext(
100 | name='pointnet2_stack_cuda',
101 | module='pcdet.ops.pointnet2.pointnet2_stack',
102 | sources=[
103 | 'src/pointnet2_api.cpp',
104 | 'src/ball_query.cpp',
105 | 'src/ball_query_gpu.cu',
106 | 'src/group_points.cpp',
107 | 'src/group_points_gpu.cu',
108 | 'src/sampling.cpp',
109 | 'src/sampling_gpu.cu',
110 | 'src/interpolate.cpp',
111 | 'src/interpolate_gpu.cu',
112 | 'src/voxel_query.cpp',
113 | 'src/voxel_query_gpu.cu',
114 | ],
115 | ),
116 | make_cuda_ext(
117 | name='pointnet2_batch_cuda',
118 | module='pcdet.ops.pointnet2.pointnet2_batch',
119 | sources=[
120 | 'src/pointnet2_api.cpp',
121 | 'src/ball_query.cpp',
122 | 'src/ball_query_gpu.cu',
123 | 'src/group_points.cpp',
124 | 'src/group_points_gpu.cu',
125 | 'src/interpolate.cpp',
126 | 'src/interpolate_gpu.cu',
127 | 'src/sampling.cpp',
128 | 'src/sampling_gpu.cu',
129 |
130 | ],
131 | ),
132 | ],
133 | )
134 |
--------------------------------------------------------------------------------
/tools/cfgs/dataset_configs/kitti_dataset.yaml:
--------------------------------------------------------------------------------
1 | DATASET: 'KittiDataset'
2 | DATA_PATH: '../data/kitti'
3 |
4 | POINT_CLOUD_RANGE: [0, -40, -3, 70.4, 40, 1]
5 |
6 | DATA_SPLIT: {
7 | 'train': train,
8 | 'test': val
9 | }
10 |
11 | INFO_PATH: {
12 | 'train': [kitti_infos_train.pkl],
13 | 'test': [kitti_infos_val.pkl],
14 | }
15 |
16 | FOV_POINTS_ONLY: True
17 |
18 |
19 | DATA_AUGMENTOR:
20 | DISABLE_AUG_LIST: ['placeholder']
21 | AUG_CONFIG_LIST:
22 | - NAME: gt_sampling
23 | USE_ROAD_PLANE: True
24 | DB_INFO_PATH:
25 | - kitti_dbinfos_train.pkl
26 | PREPARE: {
27 | filter_by_min_points: ['Car:5', 'Pedestrian:5', 'Cyclist:5'],
28 | filter_by_difficulty: [-1],
29 | }
30 |
31 | SAMPLE_GROUPS: ['Car:20','Pedestrian:15', 'Cyclist:15']
32 | NUM_POINT_FEATURES: 4
33 | DATABASE_WITH_FAKELIDAR: False
34 | REMOVE_EXTRA_WIDTH: [0.0, 0.0, 0.0]
35 | LIMIT_WHOLE_SCENE: True
36 |
37 | - NAME: random_world_flip
38 | ALONG_AXIS_LIST: ['x']
39 |
40 | - NAME: random_world_rotation
41 | WORLD_ROT_ANGLE: [-0.78539816, 0.78539816]
42 |
43 | - NAME: random_world_scaling
44 | WORLD_SCALE_RANGE: [0.95, 1.05]
45 |
46 |
47 | POINT_FEATURE_ENCODING: {
48 | encoding_type: absolute_coordinates_encoding,
49 | used_feature_list: ['x', 'y', 'z', 'intensity'],
50 | src_feature_list: ['x', 'y', 'z', 'intensity'],
51 | }
52 |
53 |
54 | DATA_PROCESSOR:
55 | - NAME: mask_points_and_boxes_outside_range
56 | REMOVE_OUTSIDE_BOXES: True
57 |
58 | - NAME: shuffle_points
59 | SHUFFLE_ENABLED: {
60 | 'train': True,
61 | 'test': False
62 | }
63 |
64 | - NAME: transform_points_to_voxels
65 | VOXEL_SIZE: [0.05, 0.05, 0.1]
66 | MAX_POINTS_PER_VOXEL: 5
67 | MAX_NUMBER_OF_VOXELS: {
68 | 'train': 16000,
69 | 'test': 40000
70 | }
71 |
--------------------------------------------------------------------------------
/tools/cfgs/dataset_configs/nuscenes_dataset.yaml:
--------------------------------------------------------------------------------
1 | DATASET: 'NuScenesDataset'
2 | DATA_PATH: '../data/nuscenes'
3 |
4 | VERSION: 'v1.0-trainval'
5 | MAX_SWEEPS: 10
6 | PRED_VELOCITY: True
7 | SET_NAN_VELOCITY_TO_ZEROS: True
8 | FILTER_MIN_POINTS_IN_GT: 1
9 |
10 | DATA_SPLIT: {
11 | 'train': train,
12 | 'test': val
13 | }
14 |
15 | INFO_PATH: {
16 | 'train': [nuscenes_infos_10sweeps_train.pkl],
17 | 'test': [nuscenes_infos_10sweeps_val.pkl],
18 | }
19 |
20 | POINT_CLOUD_RANGE: [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
21 |
22 | BALANCED_RESAMPLING: True
23 |
24 | DATA_AUGMENTOR:
25 | DISABLE_AUG_LIST: ['placeholder']
26 | AUG_CONFIG_LIST:
27 | - NAME: gt_sampling
28 | DB_INFO_PATH:
29 | - nuscenes_dbinfos_10sweeps_withvelo.pkl
30 | PREPARE: {
31 | filter_by_min_points: [
32 | 'car:5','truck:5', 'construction_vehicle:5', 'bus:5', 'trailer:5',
33 | 'barrier:5', 'motorcycle:5', 'bicycle:5', 'pedestrian:5', 'traffic_cone:5'
34 | ],
35 | }
36 |
37 | SAMPLE_GROUPS: [
38 | 'car:2','truck:3', 'construction_vehicle:7', 'bus:4', 'trailer:6',
39 | 'barrier:2', 'motorcycle:6', 'bicycle:6', 'pedestrian:2', 'traffic_cone:2'
40 | ]
41 |
42 | NUM_POINT_FEATURES: 5
43 | DATABASE_WITH_FAKELIDAR: False
44 | REMOVE_EXTRA_WIDTH: [0.0, 0.0, 0.0]
45 | LIMIT_WHOLE_SCENE: True
46 |
47 | - NAME: random_world_flip
48 | ALONG_AXIS_LIST: ['x', 'y']
49 |
50 | - NAME: random_world_rotation
51 | WORLD_ROT_ANGLE: [-0.3925, 0.3925]
52 |
53 | - NAME: random_world_scaling
54 | WORLD_SCALE_RANGE: [0.95, 1.05]
55 |
56 |
57 | POINT_FEATURE_ENCODING: {
58 | encoding_type: absolute_coordinates_encoding,
59 | used_feature_list: ['x', 'y', 'z', 'intensity', 'timestamp'],
60 | src_feature_list: ['x', 'y', 'z', 'intensity', 'timestamp'],
61 | }
62 |
63 |
64 | DATA_PROCESSOR:
65 | - NAME: mask_points_and_boxes_outside_range
66 | REMOVE_OUTSIDE_BOXES: True
67 |
68 | - NAME: shuffle_points
69 | SHUFFLE_ENABLED: {
70 | 'train': True,
71 | 'test': True
72 | }
73 |
74 | - NAME: transform_points_to_voxels
75 | VOXEL_SIZE: [0.1, 0.1, 0.2]
76 | MAX_POINTS_PER_VOXEL: 10
77 | MAX_NUMBER_OF_VOXELS: {
78 | 'train': 60000,
79 | 'test': 60000
80 | }
81 |
--------------------------------------------------------------------------------
/tools/cfgs/dataset_configs/waymo_dataset.yaml:
--------------------------------------------------------------------------------
1 | DATASET: 'WaymoDataset'
2 | DATA_PATH: '../data/waymo'
3 | PROCESSED_DATA_TAG: 'waymo_processed_data'
4 |
5 | POINT_CLOUD_RANGE: [-75.2, -75.2, -2, 75.2, 75.2, 4]
6 |
7 | DATA_SPLIT: {
8 | 'train': train,
9 | 'test': val
10 | }
11 |
12 | SAMPLED_INTERVAL: {
13 | 'train': 5,
14 | 'test': 5
15 | }
16 |
17 | DATA_AUGMENTOR:
18 | DISABLE_AUG_LIST: ['placeholder']
19 | AUG_CONFIG_LIST:
20 | - NAME: gt_sampling
21 | USE_ROAD_PLANE: False
22 | DB_INFO_PATH:
23 | - pcdet_waymo_dbinfos_train_sampled_10.pkl
24 | PREPARE: {
25 | filter_by_min_points: ['Vehicle:5', 'Pedestrian:5', 'Cyclist:5'],
26 | filter_by_difficulty: [-1],
27 | }
28 |
29 | SAMPLE_GROUPS: ['Vehicle:15', 'Pedestrian:10', 'Cyclist:10']
30 | NUM_POINT_FEATURES: 5
31 | REMOVE_EXTRA_WIDTH: [0.0, 0.0, 0.0]
32 | LIMIT_WHOLE_SCENE: True
33 |
34 | - NAME: random_world_flip
35 | ALONG_AXIS_LIST: ['x', 'y']
36 |
37 | - NAME: random_world_rotation
38 | WORLD_ROT_ANGLE: [-0.78539816, 0.78539816]
39 |
40 | - NAME: random_world_scaling
41 | WORLD_SCALE_RANGE: [0.95, 1.05]
42 |
43 |
44 | POINT_FEATURE_ENCODING: {
45 | encoding_type: absolute_coordinates_encoding,
46 | used_feature_list: ['x', 'y', 'z', 'intensity', 'elongation'],
47 | src_feature_list: ['x', 'y', 'z', 'intensity', 'elongation'],
48 | }
49 |
50 |
51 | DATA_PROCESSOR:
52 | - NAME: mask_points_and_boxes_outside_range
53 | REMOVE_OUTSIDE_BOXES: True
54 |
55 | - NAME: shuffle_points
56 | SHUFFLE_ENABLED: {
57 | 'train': True,
58 | 'test': True
59 | }
60 |
61 | - NAME: transform_points_to_voxels
62 | VOXEL_SIZE: [0.1, 0.1, 0.15]
63 | MAX_POINTS_PER_VOXEL: 5
64 | MAX_NUMBER_OF_VOXELS: {
65 | 'train': 80000,
66 | 'test': 90000
67 | }
68 |
--------------------------------------------------------------------------------
/tools/cfgs/kitti_models/PartA2_free.yaml:
--------------------------------------------------------------------------------
1 | CLASS_NAMES: ['Car', 'Pedestrian', 'Cyclist']
2 |
3 | DATA_CONFIG:
4 | _BASE_CONFIG_: cfgs/dataset_configs/kitti_dataset.yaml
5 |
6 |
7 | MODEL:
8 | NAME: PointRCNN
9 |
10 | VFE:
11 | NAME: MeanVFE
12 |
13 | BACKBONE_3D:
14 | NAME: UNetV2
15 | RETURN_ENCODED_TENSOR: False
16 |
17 | POINT_HEAD:
18 | NAME: PointIntraPartOffsetHead
19 | CLS_FC: [128, 128]
20 | PART_FC: [128, 128]
21 | REG_FC: [128, 128]
22 | CLASS_AGNOSTIC: False
23 | USE_POINT_FEATURES_BEFORE_FUSION: False
24 | TARGET_CONFIG:
25 | GT_EXTRA_WIDTH: [0.2, 0.2, 0.2]
26 | BOX_CODER: PointResidualCoder
27 | BOX_CODER_CONFIG: {
28 | 'use_mean_size': True,
29 | 'mean_size': [
30 | [3.9, 1.6, 1.56],
31 | [0.8, 0.6, 1.73],
32 | [1.76, 0.6, 1.73]
33 | ]
34 | }
35 |
36 | LOSS_CONFIG:
37 | LOSS_REG: WeightedSmoothL1Loss
38 | LOSS_WEIGHTS: {
39 | 'point_cls_weight': 1.0,
40 | 'point_box_weight': 1.0,
41 | 'point_part_weight': 1.0,
42 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
43 | }
44 |
45 | ROI_HEAD:
46 | NAME: PartA2FCHead
47 | CLASS_AGNOSTIC: True
48 |
49 | SHARED_FC: [256, 256, 256]
50 | CLS_FC: [256, 256]
51 | REG_FC: [256, 256]
52 | DP_RATIO: 0.3
53 | DISABLE_PART: True
54 | SEG_MASK_SCORE_THRESH: 0.0
55 |
56 | NMS_CONFIG:
57 | TRAIN:
58 | NMS_TYPE: nms_gpu
59 | MULTI_CLASSES_NMS: False
60 | NMS_PRE_MAXSIZE: 9000
61 | NMS_POST_MAXSIZE: 512
62 | NMS_THRESH: 0.8
63 | TEST:
64 | NMS_TYPE: nms_gpu
65 | MULTI_CLASSES_NMS: False
66 | NMS_PRE_MAXSIZE: 9000
67 | NMS_POST_MAXSIZE: 100
68 | NMS_THRESH: 0.85
69 |
70 | ROI_AWARE_POOL:
71 | POOL_SIZE: 12
72 | NUM_FEATURES: 128
73 | MAX_POINTS_PER_VOXEL: 128
74 |
75 | TARGET_CONFIG:
76 | BOX_CODER: ResidualCoder
77 | ROI_PER_IMAGE: 128
78 | FG_RATIO: 0.5
79 |
80 | SAMPLE_ROI_BY_EACH_CLASS: True
81 | CLS_SCORE_TYPE: roi_iou
82 |
83 | CLS_FG_THRESH: 0.75
84 | CLS_BG_THRESH: 0.25
85 | CLS_BG_THRESH_LO: 0.1
86 | HARD_BG_RATIO: 0.8
87 |
88 | REG_FG_THRESH: 0.65
89 |
90 | LOSS_CONFIG:
91 | CLS_LOSS: BinaryCrossEntropy
92 | REG_LOSS: smooth-l1
93 | CORNER_LOSS_REGULARIZATION: True
94 | LOSS_WEIGHTS: {
95 | 'rcnn_cls_weight': 1.0,
96 | 'rcnn_reg_weight': 1.0,
97 | 'rcnn_corner_weight': 1.0,
98 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
99 | }
100 |
101 | POST_PROCESSING:
102 | RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
103 | SCORE_THRESH: 0.1
104 | OUTPUT_RAW_SCORE: False
105 |
106 | EVAL_METRIC: kitti
107 |
108 | NMS_CONFIG:
109 | MULTI_CLASSES_NMS: False
110 | NMS_TYPE: nms_gpu
111 | NMS_THRESH: 0.1
112 | NMS_PRE_MAXSIZE: 4096
113 | NMS_POST_MAXSIZE: 500
114 |
115 |
116 | OPTIMIZATION:
117 | BATCH_SIZE_PER_GPU: 4
118 | NUM_EPOCHS: 80
119 |
120 | OPTIMIZER: adam_onecycle
121 | LR: 0.003
122 | WEIGHT_DECAY: 0.01
123 | MOMENTUM: 0.9
124 |
125 | MOMS: [0.95, 0.85]
126 | PCT_START: 0.4
127 | DIV_FACTOR: 10
128 | DECAY_STEP_LIST: [35, 45]
129 | LR_DECAY: 0.1
130 | LR_CLIP: 0.0000001
131 |
132 | LR_WARMUP: False
133 | WARMUP_EPOCH: 1
134 |
135 | GRAD_NORM_CLIP: 10
136 |
--------------------------------------------------------------------------------
/tools/cfgs/kitti_models/pointpillar.yaml:
--------------------------------------------------------------------------------
1 | CLASS_NAMES: ['Car', 'Pedestrian', 'Cyclist']
2 |
3 | DATA_CONFIG:
4 | _BASE_CONFIG_: cfgs/dataset_configs/kitti_dataset.yaml
5 | POINT_CLOUD_RANGE: [0, -39.68, -3, 69.12, 39.68, 1]
6 | DATA_PROCESSOR:
7 | - NAME: mask_points_and_boxes_outside_range
8 | REMOVE_OUTSIDE_BOXES: True
9 |
10 | - NAME: shuffle_points
11 | SHUFFLE_ENABLED: {
12 | 'train': True,
13 | 'test': False
14 | }
15 |
16 | - NAME: transform_points_to_voxels
17 | VOXEL_SIZE: [0.16, 0.16, 4]
18 | MAX_POINTS_PER_VOXEL: 32
19 | MAX_NUMBER_OF_VOXELS: {
20 | 'train': 16000,
21 | 'test': 40000
22 | }
23 | DATA_AUGMENTOR:
24 | DISABLE_AUG_LIST: ['placeholder']
25 | AUG_CONFIG_LIST:
26 | - NAME: gt_sampling
27 | USE_ROAD_PLANE: True
28 | DB_INFO_PATH:
29 | - kitti_dbinfos_train.pkl
30 | PREPARE: {
31 | filter_by_min_points: ['Car:5', 'Pedestrian:5', 'Cyclist:5'],
32 | filter_by_difficulty: [-1],
33 | }
34 |
35 | SAMPLE_GROUPS: ['Car:15','Pedestrian:15', 'Cyclist:15']
36 | NUM_POINT_FEATURES: 4
37 | DATABASE_WITH_FAKELIDAR: False
38 | REMOVE_EXTRA_WIDTH: [0.0, 0.0, 0.0]
39 | LIMIT_WHOLE_SCENE: False
40 |
41 | - NAME: random_world_flip
42 | ALONG_AXIS_LIST: ['x']
43 |
44 | - NAME: random_world_rotation
45 | WORLD_ROT_ANGLE: [-0.78539816, 0.78539816]
46 |
47 | - NAME: random_world_scaling
48 | WORLD_SCALE_RANGE: [0.95, 1.05]
49 |
50 | MODEL:
51 | NAME: PointPillar
52 |
53 | VFE:
54 | NAME: PillarVFE
55 | WITH_DISTANCE: False
56 | USE_ABSLOTE_XYZ: True
57 | USE_NORM: True
58 | NUM_FILTERS: [64]
59 |
60 | MAP_TO_BEV:
61 | NAME: PointPillarScatter
62 | NUM_BEV_FEATURES: 64
63 |
64 | BACKBONE_2D:
65 | NAME: BaseBEVBackbone
66 | LAYER_NUMS: [3, 5, 5]
67 | LAYER_STRIDES: [2, 2, 2]
68 | NUM_FILTERS: [64, 128, 256]
69 | UPSAMPLE_STRIDES: [1, 2, 4]
70 | NUM_UPSAMPLE_FILTERS: [128, 128, 128]
71 |
72 | DENSE_HEAD:
73 | NAME: AnchorHeadSingle
74 | CLASS_AGNOSTIC: False
75 |
76 | USE_DIRECTION_CLASSIFIER: True
77 | DIR_OFFSET: 0.78539
78 | DIR_LIMIT_OFFSET: 0.0
79 | NUM_DIR_BINS: 2
80 |
81 | ANCHOR_GENERATOR_CONFIG: [
82 | {
83 | 'class_name': 'Car',
84 | 'anchor_sizes': [[3.9, 1.6, 1.56]],
85 | 'anchor_rotations': [0, 1.57],
86 | 'anchor_bottom_heights': [-1.78],
87 | 'align_center': False,
88 | 'feature_map_stride': 2,
89 | 'matched_threshold': 0.6,
90 | 'unmatched_threshold': 0.45
91 | },
92 | {
93 | 'class_name': 'Pedestrian',
94 | 'anchor_sizes': [[0.8, 0.6, 1.73]],
95 | 'anchor_rotations': [0, 1.57],
96 | 'anchor_bottom_heights': [-0.6],
97 | 'align_center': False,
98 | 'feature_map_stride': 2,
99 | 'matched_threshold': 0.5,
100 | 'unmatched_threshold': 0.35
101 | },
102 | {
103 | 'class_name': 'Cyclist',
104 | 'anchor_sizes': [[1.76, 0.6, 1.73]],
105 | 'anchor_rotations': [0, 1.57],
106 | 'anchor_bottom_heights': [-0.6],
107 | 'align_center': False,
108 | 'feature_map_stride': 2,
109 | 'matched_threshold': 0.5,
110 | 'unmatched_threshold': 0.35
111 | }
112 | ]
113 |
114 | TARGET_ASSIGNER_CONFIG:
115 | NAME: AxisAlignedTargetAssigner
116 | POS_FRACTION: -1.0
117 | SAMPLE_SIZE: 512
118 | NORM_BY_NUM_EXAMPLES: False
119 | MATCH_HEIGHT: False
120 | BOX_CODER: ResidualCoder
121 |
122 | LOSS_CONFIG:
123 | LOSS_WEIGHTS: {
124 | 'cls_weight': 1.0,
125 | 'loc_weight': 2.0,
126 | 'dir_weight': 0.2,
127 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
128 | }
129 |
130 | POST_PROCESSING:
131 | RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
132 | SCORE_THRESH: 0.1
133 | OUTPUT_RAW_SCORE: False
134 |
135 | EVAL_METRIC: kitti
136 |
137 | NMS_CONFIG:
138 | MULTI_CLASSES_NMS: False
139 | NMS_TYPE: nms_gpu
140 | NMS_THRESH: 0.01
141 | NMS_PRE_MAXSIZE: 4096
142 | NMS_POST_MAXSIZE: 500
143 |
144 |
145 | OPTIMIZATION:
146 | BATCH_SIZE_PER_GPU: 4
147 | NUM_EPOCHS: 80
148 |
149 | OPTIMIZER: adam_onecycle
150 | LR: 0.003
151 | WEIGHT_DECAY: 0.01
152 | MOMENTUM: 0.9
153 |
154 | MOMS: [0.95, 0.85]
155 | PCT_START: 0.4
156 | DIV_FACTOR: 10
157 | DECAY_STEP_LIST: [35, 45]
158 | LR_DECAY: 0.1
159 | LR_CLIP: 0.0000001
160 |
161 | LR_WARMUP: False
162 | WARMUP_EPOCH: 1
163 |
164 | GRAD_NORM_CLIP: 10
165 |
--------------------------------------------------------------------------------
/tools/cfgs/kitti_models/pointrcnn.yaml:
--------------------------------------------------------------------------------
1 | CLASS_NAMES: ['Car', 'Pedestrian', 'Cyclist']
2 |
3 | DATA_CONFIG:
4 | _BASE_CONFIG_: cfgs/dataset_configs/kitti_dataset.yaml
5 |
6 | DATA_PROCESSOR:
7 | - NAME: mask_points_and_boxes_outside_range
8 | REMOVE_OUTSIDE_BOXES: True
9 |
10 | - NAME: sample_points
11 | NUM_POINTS: {
12 | 'train': 16384,
13 | 'test': 16384
14 | }
15 |
16 | - NAME: shuffle_points
17 | SHUFFLE_ENABLED: {
18 | 'train': True,
19 | 'test': False
20 | }
21 |
22 | MODEL:
23 | NAME: PointRCNN
24 |
25 | BACKBONE_3D:
26 | NAME: PointNet2MSG
27 | SA_CONFIG:
28 | NPOINTS: [4096, 1024, 256, 64]
29 | RADIUS: [[0.1, 0.5], [0.5, 1.0], [1.0, 2.0], [2.0, 4.0]]
30 | NSAMPLE: [[16, 32], [16, 32], [16, 32], [16, 32]]
31 | MLPS: [[[16, 16, 32], [32, 32, 64]],
32 | [[64, 64, 128], [64, 96, 128]],
33 | [[128, 196, 256], [128, 196, 256]],
34 | [[256, 256, 512], [256, 384, 512]]]
35 | FP_MLPS: [[128, 128], [256, 256], [512, 512], [512, 512]]
36 |
37 | POINT_HEAD:
38 | NAME: PointHeadBox
39 | CLS_FC: [256, 256]
40 | REG_FC: [256, 256]
41 | CLASS_AGNOSTIC: False
42 | USE_POINT_FEATURES_BEFORE_FUSION: False
43 | TARGET_CONFIG:
44 | GT_EXTRA_WIDTH: [0.2, 0.2, 0.2]
45 | BOX_CODER: PointResidualCoder
46 | BOX_CODER_CONFIG: {
47 | 'use_mean_size': True,
48 | 'mean_size': [
49 | [3.9, 1.6, 1.56],
50 | [0.8, 0.6, 1.73],
51 | [1.76, 0.6, 1.73]
52 | ]
53 | }
54 |
55 | LOSS_CONFIG:
56 | LOSS_REG: WeightedSmoothL1Loss
57 | LOSS_WEIGHTS: {
58 | 'point_cls_weight': 1.0,
59 | 'point_box_weight': 1.0,
60 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
61 | }
62 |
63 | ROI_HEAD:
64 | NAME: PointRCNNHead
65 | CLASS_AGNOSTIC: True
66 |
67 | ROI_POINT_POOL:
68 | POOL_EXTRA_WIDTH: [0.0, 0.0, 0.0]
69 | NUM_SAMPLED_POINTS: 512
70 | DEPTH_NORMALIZER: 70.0
71 |
72 | XYZ_UP_LAYER: [128, 128]
73 | CLS_FC: [256, 256]
74 | REG_FC: [256, 256]
75 | DP_RATIO: 0.0
76 | USE_BN: False
77 |
78 | SA_CONFIG:
79 | NPOINTS: [128, 32, -1]
80 | RADIUS: [0.2, 0.4, 100]
81 | NSAMPLE: [16, 16, 16]
82 | MLPS: [[128, 128, 128],
83 | [128, 128, 256],
84 | [256, 256, 512]]
85 |
86 | NMS_CONFIG:
87 | TRAIN:
88 | NMS_TYPE: nms_gpu
89 | MULTI_CLASSES_NMS: False
90 | NMS_PRE_MAXSIZE: 9000
91 | NMS_POST_MAXSIZE: 512
92 | NMS_THRESH: 0.8
93 | TEST:
94 | NMS_TYPE: nms_gpu
95 | MULTI_CLASSES_NMS: False
96 | NMS_PRE_MAXSIZE: 9000
97 | NMS_POST_MAXSIZE: 100
98 | NMS_THRESH: 0.85
99 |
100 | TARGET_CONFIG:
101 | BOX_CODER: ResidualCoder
102 | ROI_PER_IMAGE: 128
103 | FG_RATIO: 0.5
104 |
105 | SAMPLE_ROI_BY_EACH_CLASS: True
106 | CLS_SCORE_TYPE: cls
107 |
108 | CLS_FG_THRESH: 0.6
109 | CLS_BG_THRESH: 0.45
110 | CLS_BG_THRESH_LO: 0.1
111 | HARD_BG_RATIO: 0.8
112 |
113 | REG_FG_THRESH: 0.55
114 |
115 | LOSS_CONFIG:
116 | CLS_LOSS: BinaryCrossEntropy
117 | REG_LOSS: smooth-l1
118 | CORNER_LOSS_REGULARIZATION: True
119 | LOSS_WEIGHTS: {
120 | 'rcnn_cls_weight': 1.0,
121 | 'rcnn_reg_weight': 1.0,
122 | 'rcnn_corner_weight': 1.0,
123 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
124 | }
125 |
126 | POST_PROCESSING:
127 | RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
128 | SCORE_THRESH: 0.1
129 | OUTPUT_RAW_SCORE: False
130 |
131 | EVAL_METRIC: kitti
132 |
133 | NMS_CONFIG:
134 | MULTI_CLASSES_NMS: False
135 | NMS_TYPE: nms_gpu
136 | NMS_THRESH: 0.1
137 | NMS_PRE_MAXSIZE: 4096
138 | NMS_POST_MAXSIZE: 500
139 |
140 |
141 | OPTIMIZATION:
142 | BATCH_SIZE_PER_GPU: 2
143 | NUM_EPOCHS: 80
144 |
145 | OPTIMIZER: adam_onecycle
146 | LR: 0.01
147 | WEIGHT_DECAY: 0.01
148 | MOMENTUM: 0.9
149 |
150 | MOMS: [0.95, 0.85]
151 | PCT_START: 0.4
152 | DIV_FACTOR: 10
153 | DECAY_STEP_LIST: [35, 45]
154 | LR_DECAY: 0.1
155 | LR_CLIP: 0.0000001
156 |
157 | LR_WARMUP: False
158 | WARMUP_EPOCH: 1
159 |
160 | GRAD_NORM_CLIP: 10
161 |
--------------------------------------------------------------------------------
/tools/cfgs/kitti_models/pointrcnn_iou.yaml:
--------------------------------------------------------------------------------
1 | CLASS_NAMES: ['Car', 'Pedestrian', 'Cyclist']
2 |
3 | DATA_CONFIG:
4 | _BASE_CONFIG_: cfgs/dataset_configs/kitti_dataset.yaml
5 |
6 | DATA_PROCESSOR:
7 | - NAME: mask_points_and_boxes_outside_range
8 | REMOVE_OUTSIDE_BOXES: True
9 |
10 | - NAME: sample_points
11 | NUM_POINTS: {
12 | 'train': 16384,
13 | 'test': 16384
14 | }
15 |
16 | - NAME: shuffle_points
17 | SHUFFLE_ENABLED: {
18 | 'train': True,
19 | 'test': False
20 | }
21 |
22 | MODEL:
23 | NAME: PointRCNN
24 |
25 | BACKBONE_3D:
26 | NAME: PointNet2MSG
27 | SA_CONFIG:
28 | NPOINTS: [4096, 1024, 256, 64]
29 | RADIUS: [[0.1, 0.5], [0.5, 1.0], [1.0, 2.0], [2.0, 4.0]]
30 | NSAMPLE: [[16, 32], [16, 32], [16, 32], [16, 32]]
31 | MLPS: [[[16, 16, 32], [32, 32, 64]],
32 | [[64, 64, 128], [64, 96, 128]],
33 | [[128, 196, 256], [128, 196, 256]],
34 | [[256, 256, 512], [256, 384, 512]]]
35 | FP_MLPS: [[128, 128], [256, 256], [512, 512], [512, 512]]
36 |
37 | POINT_HEAD:
38 | NAME: PointHeadBox
39 | CLS_FC: [256, 256]
40 | REG_FC: [256, 256]
41 | CLASS_AGNOSTIC: False
42 | USE_POINT_FEATURES_BEFORE_FUSION: False
43 | TARGET_CONFIG:
44 | GT_EXTRA_WIDTH: [0.2, 0.2, 0.2]
45 | BOX_CODER: PointResidualCoder
46 | BOX_CODER_CONFIG: {
47 | 'use_mean_size': True,
48 | 'mean_size': [
49 | [3.9, 1.6, 1.56],
50 | [0.8, 0.6, 1.73],
51 | [1.76, 0.6, 1.73]
52 | ]
53 | }
54 |
55 | LOSS_CONFIG:
56 | LOSS_REG: WeightedSmoothL1Loss
57 | LOSS_WEIGHTS: {
58 | 'point_cls_weight': 1.0,
59 | 'point_box_weight': 1.0,
60 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
61 | }
62 |
63 | ROI_HEAD:
64 | NAME: PointRCNNHead
65 | CLASS_AGNOSTIC: True
66 |
67 | ROI_POINT_POOL:
68 | POOL_EXTRA_WIDTH: [0.0, 0.0, 0.0]
69 | NUM_SAMPLED_POINTS: 512
70 | DEPTH_NORMALIZER: 70.0
71 |
72 | XYZ_UP_LAYER: [128, 128]
73 | CLS_FC: [256, 256]
74 | REG_FC: [256, 256]
75 | DP_RATIO: 0.0
76 | USE_BN: False
77 |
78 | SA_CONFIG:
79 | NPOINTS: [128, 32, -1]
80 | RADIUS: [0.2, 0.4, 100]
81 | NSAMPLE: [16, 16, 16]
82 | MLPS: [[128, 128, 128],
83 | [128, 128, 256],
84 | [256, 256, 512]]
85 |
86 | NMS_CONFIG:
87 | TRAIN:
88 | NMS_TYPE: nms_gpu
89 | MULTI_CLASSES_NMS: False
90 | NMS_PRE_MAXSIZE: 9000
91 | NMS_POST_MAXSIZE: 512
92 | NMS_THRESH: 0.8
93 | TEST:
94 | NMS_TYPE: nms_gpu
95 | MULTI_CLASSES_NMS: False
96 | NMS_PRE_MAXSIZE: 9000
97 | NMS_POST_MAXSIZE: 100
98 | NMS_THRESH: 0.85
99 |
100 | TARGET_CONFIG:
101 | BOX_CODER: ResidualCoder
102 | ROI_PER_IMAGE: 128
103 | FG_RATIO: 0.5
104 |
105 | SAMPLE_ROI_BY_EACH_CLASS: True
106 | CLS_SCORE_TYPE: roi_iou
107 |
108 | CLS_FG_THRESH: 0.7
109 | CLS_BG_THRESH: 0.25
110 | CLS_BG_THRESH_LO: 0.1
111 | HARD_BG_RATIO: 0.8
112 |
113 | REG_FG_THRESH: 0.55
114 |
115 | LOSS_CONFIG:
116 | CLS_LOSS: BinaryCrossEntropy
117 | REG_LOSS: smooth-l1
118 | CORNER_LOSS_REGULARIZATION: True
119 | LOSS_WEIGHTS: {
120 | 'rcnn_cls_weight': 1.0,
121 | 'rcnn_reg_weight': 1.0,
122 | 'rcnn_corner_weight': 1.0,
123 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
124 | }
125 |
126 | POST_PROCESSING:
127 | RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
128 | SCORE_THRESH: 0.1
129 | OUTPUT_RAW_SCORE: False
130 |
131 | EVAL_METRIC: kitti
132 |
133 | NMS_CONFIG:
134 | MULTI_CLASSES_NMS: False
135 | NMS_TYPE: nms_gpu
136 | NMS_THRESH: 0.1
137 | NMS_PRE_MAXSIZE: 4096
138 | NMS_POST_MAXSIZE: 500
139 |
140 |
141 | OPTIMIZATION:
142 | BATCH_SIZE_PER_GPU: 3
143 | NUM_EPOCHS: 80
144 |
145 | OPTIMIZER: adam_onecycle
146 | LR: 0.01
147 | WEIGHT_DECAY: 0.01
148 | MOMENTUM: 0.9
149 |
150 | MOMS: [0.95, 0.85]
151 | PCT_START: 0.4
152 | DIV_FACTOR: 10
153 | DECAY_STEP_LIST: [35, 45]
154 | LR_DECAY: 0.1
155 | LR_CLIP: 0.0000001
156 |
157 | LR_WARMUP: False
158 | WARMUP_EPOCH: 1
159 |
160 | GRAD_NORM_CLIP: 10
161 |
--------------------------------------------------------------------------------
/tools/cfgs/kitti_models/second.yaml:
--------------------------------------------------------------------------------
1 | CLASS_NAMES: ['Car', 'Pedestrian', 'Cyclist']
2 |
3 | DATA_CONFIG:
4 | _BASE_CONFIG_: cfgs/dataset_configs/kitti_dataset.yaml
5 |
6 |
7 | MODEL:
8 | NAME: SECONDNet
9 |
10 | VFE:
11 | NAME: MeanVFE
12 |
13 | BACKBONE_3D:
14 | NAME: VoxelBackBone8x
15 |
16 | MAP_TO_BEV:
17 | NAME: HeightCompression
18 | NUM_BEV_FEATURES: 256
19 |
20 | BACKBONE_2D:
21 | NAME: BaseBEVBackbone
22 | # 一个下采样块对应一个上采样块
23 | LAYER_NUMS: [5, 5] # 每个layer内conv2d重复的次数
24 | LAYER_STRIDES: [1, 2] # 长宽是否减半
25 | NUM_FILTERS: [128, 256] # 下采样通道数
26 | UPSAMPLE_STRIDES: [1, 2] # 上采样倍率
27 | NUM_UPSAMPLE_FILTERS: [256, 256] # 上采样通道数
28 |
29 | DENSE_HEAD:
30 | NAME: AnchorHeadSingle
31 | CLASS_AGNOSTIC: False
32 |
33 | USE_DIRECTION_CLASSIFIER: True
34 | DIR_OFFSET: 0.78539
35 | DIR_LIMIT_OFFSET: 0.0
36 | NUM_DIR_BINS: 2
37 |
38 | ANCHOR_GENERATOR_CONFIG: [
39 | {
40 | 'class_name': 'Car',
41 | 'anchor_sizes': [[3.9, 1.6, 1.56]],
42 | 'anchor_rotations': [0, 1.57],
43 | 'anchor_bottom_heights': [-1.78],
44 | 'align_center': False,
45 | 'feature_map_stride': 8,
46 | 'matched_threshold': 0.6,
47 | 'unmatched_threshold': 0.45
48 | },
49 | {
50 | 'class_name': 'Pedestrian',
51 | 'anchor_sizes': [[0.8, 0.6, 1.73]],
52 | 'anchor_rotations': [0, 1.57],
53 | 'anchor_bottom_heights': [-0.6],
54 | 'align_center': False,
55 | 'feature_map_stride': 8,
56 | 'matched_threshold': 0.5,
57 | 'unmatched_threshold': 0.35
58 | },
59 | {
60 | 'class_name': 'Cyclist',
61 | 'anchor_sizes': [[1.76, 0.6, 1.73]],
62 | 'anchor_rotations': [0, 1.57],
63 | 'anchor_bottom_heights': [-0.6],
64 | 'align_center': False,
65 | 'feature_map_stride': 8,
66 | 'matched_threshold': 0.5,
67 | 'unmatched_threshold': 0.35
68 | }
69 | ]
70 |
71 | TARGET_ASSIGNER_CONFIG:
72 | NAME: AxisAlignedTargetAssigner
73 | POS_FRACTION: -1.0
74 | SAMPLE_SIZE: 512
75 | NORM_BY_NUM_EXAMPLES: False
76 | MATCH_HEIGHT: False
77 | BOX_CODER: ResidualCoder
78 |
79 | LOSS_CONFIG:
80 | LOSS_WEIGHTS: {
81 | 'cls_weight': 1.0,
82 | 'loc_weight': 2.0,
83 | 'dir_weight': 0.2,
84 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
85 | }
86 |
87 | POST_PROCESSING:
88 | RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
89 | SCORE_THRESH: 0.1
90 | OUTPUT_RAW_SCORE: False
91 |
92 | EVAL_METRIC: kitti
93 |
94 | NMS_CONFIG:
95 | MULTI_CLASSES_NMS: False
96 | NMS_TYPE: nms_gpu
97 | NMS_THRESH: 0.01
98 | NMS_PRE_MAXSIZE: 4096
99 | NMS_POST_MAXSIZE: 500
100 |
101 |
102 | OPTIMIZATION:
103 | BATCH_SIZE_PER_GPU: 4
104 | NUM_EPOCHS: 80
105 |
106 | OPTIMIZER: adam_onecycle
107 | LR: 0.003
108 | WEIGHT_DECAY: 0.01
109 | MOMENTUM: 0.9
110 |
111 | MOMS: [0.95, 0.85]
112 | PCT_START: 0.4
113 | DIV_FACTOR: 10
114 | DECAY_STEP_LIST: [35, 45]
115 | LR_DECAY: 0.1
116 | LR_CLIP: 0.0000001
117 |
118 | LR_WARMUP: False
119 | WARMUP_EPOCH: 1
120 |
121 | GRAD_NORM_CLIP: 10
122 |
--------------------------------------------------------------------------------
/tools/cfgs/kitti_models/second_multihead.yaml:
--------------------------------------------------------------------------------
1 | CLASS_NAMES: ['Car', 'Pedestrian', 'Cyclist']
2 | DATA_CONFIG:
3 | _BASE_CONFIG_: cfgs/dataset_configs/kitti_dataset.yaml
4 |
5 |
6 | MODEL:
7 | NAME: SECONDNet
8 |
9 | VFE:
10 | NAME: MeanVFE
11 |
12 | BACKBONE_3D:
13 | NAME: VoxelBackBone8x
14 |
15 | MAP_TO_BEV:
16 | NAME: HeightCompression
17 | NUM_BEV_FEATURES: 256
18 |
19 | BACKBONE_2D:
20 | NAME: BaseBEVBackbone
21 |
22 | LAYER_NUMS: [5, 5]
23 | LAYER_STRIDES: [1, 2]
24 | NUM_FILTERS: [128, 256]
25 | UPSAMPLE_STRIDES: [1, 2]
26 | NUM_UPSAMPLE_FILTERS: [256, 256]
27 |
28 | DENSE_HEAD:
29 | NAME: AnchorHeadMulti
30 | CLASS_AGNOSTIC: False
31 |
32 | USE_DIRECTION_CLASSIFIER: True
33 | DIR_OFFSET: 0.78539
34 | DIR_LIMIT_OFFSET: 0.0
35 | NUM_DIR_BINS: 2
36 |
37 | USE_MULTIHEAD: True
38 | SEPARATE_MULTIHEAD: True
39 | ANCHOR_GENERATOR_CONFIG: [
40 | {
41 | 'class_name': 'Car',
42 | 'anchor_sizes': [[3.9, 1.6, 1.56]],
43 | 'anchor_rotations': [0, 1.57],
44 | 'anchor_bottom_heights': [-1.6],
45 | 'align_center': False,
46 | 'feature_map_stride': 8,
47 | 'matched_threshold': 0.6,
48 | 'unmatched_threshold': 0.45
49 | },
50 | {
51 | 'class_name': 'Pedestrian',
52 | 'anchor_sizes': [[0.8, 0.6, 1.73]],
53 | 'anchor_rotations': [0, 1.57],
54 | 'anchor_bottom_heights': [-1.6],
55 | 'align_center': False,
56 | 'feature_map_stride': 8,
57 | 'matched_threshold': 0.5,
58 | 'unmatched_threshold': 0.35
59 | },
60 | {
61 | 'class_name': 'Cyclist',
62 | 'anchor_sizes': [[1.76, 0.6, 1.73]],
63 | 'anchor_rotations': [0, 1.57],
64 | 'anchor_bottom_heights': [-1.6],
65 | 'align_center': False,
66 | 'feature_map_stride': 8,
67 | 'matched_threshold': 0.5,
68 | 'unmatched_threshold': 0.35
69 | }
70 | ]
71 |
72 | SHARED_CONV_NUM_FILTER: 64
73 |
74 | RPN_HEAD_CFGS: [
75 | {
76 | 'HEAD_CLS_NAME': ['Car'],
77 | },
78 | {
79 | 'HEAD_CLS_NAME': ['Pedestrian'],
80 | },
81 | {
82 | 'HEAD_CLS_NAME': ['Cyclist'],
83 | }
84 | ]
85 |
86 | TARGET_ASSIGNER_CONFIG:
87 | NAME: AxisAlignedTargetAssigner
88 | POS_FRACTION: -1.0
89 | SAMPLE_SIZE: 512
90 | NORM_BY_NUM_EXAMPLES: False
91 | MATCH_HEIGHT: False
92 | BOX_CODER: ResidualCoder
93 |
94 | LOSS_CONFIG:
95 | LOSS_WEIGHTS: {
96 | 'cls_weight': 1.0,
97 | 'loc_weight': 2.0,
98 | 'dir_weight': 0.2,
99 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
100 | }
101 |
102 | POST_PROCESSING:
103 | RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
104 | MULTI_CLASSES_NMS: True
105 | SCORE_THRESH: 0.1
106 | OUTPUT_RAW_SCORE: False
107 |
108 | EVAL_METRIC: kitti
109 |
110 | NMS_CONFIG:
111 | MULTI_CLASSES_NMS: True
112 | NMS_TYPE: nms_gpu
113 | NMS_THRESH: 0.1
114 | NMS_PRE_MAXSIZE: 4096
115 | NMS_POST_MAXSIZE: 500
116 |
117 |
118 | OPTIMIZATION:
119 | BATCH_SIZE_PER_GPU: 4
120 | NUM_EPOCHS: 80
121 |
122 | OPTIMIZER: adam_onecycle
123 | LR: 0.003
124 | WEIGHT_DECAY: 0.01
125 | MOMENTUM: 0.9
126 |
127 | MOMS: [0.95, 0.85]
128 | PCT_START: 0.4
129 | DIV_FACTOR: 10
130 | DECAY_STEP_LIST: [35, 45]
131 | LR_DECAY: 0.1
132 | LR_CLIP: 0.0000001
133 |
134 | LR_WARMUP: False
135 | WARMUP_EPOCH: 1
136 |
137 | GRAD_NORM_CLIP: 10
138 |
--------------------------------------------------------------------------------
/tools/cfgs/waymo_models/second.yaml:
--------------------------------------------------------------------------------
1 | CLASS_NAMES: ['Vehicle', 'Pedestrian', 'Cyclist']
2 |
3 | DATA_CONFIG:
4 | _BASE_CONFIG_: cfgs/dataset_configs/waymo_dataset.yaml
5 |
6 |
7 | MODEL:
8 | NAME: SECONDNet
9 |
10 | VFE:
11 | NAME: MeanVFE
12 |
13 | BACKBONE_3D:
14 | NAME: VoxelBackBone8x
15 |
16 | MAP_TO_BEV:
17 | NAME: HeightCompression
18 | NUM_BEV_FEATURES: 256
19 |
20 | BACKBONE_2D:
21 | NAME: BaseBEVBackbone
22 |
23 | LAYER_NUMS: [5, 5]
24 | LAYER_STRIDES: [1, 2]
25 | NUM_FILTERS: [128, 256]
26 | UPSAMPLE_STRIDES: [1, 2]
27 | NUM_UPSAMPLE_FILTERS: [256, 256]
28 |
29 | DENSE_HEAD:
30 | NAME: AnchorHeadSingle
31 | CLASS_AGNOSTIC: False
32 |
33 | USE_DIRECTION_CLASSIFIER: True
34 | DIR_OFFSET: 0.78539
35 | DIR_LIMIT_OFFSET: 0.0
36 | NUM_DIR_BINS: 2
37 |
38 | ANCHOR_GENERATOR_CONFIG: [
39 | {
40 | 'class_name': 'Vehicle',
41 | 'anchor_sizes': [[4.7, 2.1, 1.7]],
42 | 'anchor_rotations': [0, 1.57],
43 | 'anchor_bottom_heights': [0],
44 | 'align_center': False,
45 | 'feature_map_stride': 8,
46 | 'matched_threshold': 0.55,
47 | 'unmatched_threshold': 0.4
48 | },
49 | {
50 | 'class_name': 'Pedestrian',
51 | 'anchor_sizes': [[0.91, 0.86, 1.73]],
52 | 'anchor_rotations': [0, 1.57],
53 | 'anchor_bottom_heights': [0],
54 | 'align_center': False,
55 | 'feature_map_stride': 8,
56 | 'matched_threshold': 0.5,
57 | 'unmatched_threshold': 0.35
58 | },
59 | {
60 | 'class_name': 'Cyclist',
61 | 'anchor_sizes': [[1.78, 0.84, 1.78]],
62 | 'anchor_rotations': [0, 1.57],
63 | 'anchor_bottom_heights': [0],
64 | 'align_center': False,
65 | 'feature_map_stride': 8,
66 | 'matched_threshold': 0.5,
67 | 'unmatched_threshold': 0.35
68 | }
69 | ]
70 |
71 | TARGET_ASSIGNER_CONFIG:
72 | NAME: AxisAlignedTargetAssigner
73 | POS_FRACTION: -1.0
74 | SAMPLE_SIZE: 512
75 | NORM_BY_NUM_EXAMPLES: False
76 | MATCH_HEIGHT: False
77 | BOX_CODER: ResidualCoder
78 |
79 | LOSS_CONFIG:
80 | LOSS_WEIGHTS: {
81 | 'cls_weight': 1.0,
82 | 'loc_weight': 2.0,
83 | 'dir_weight': 0.2,
84 | 'code_weights': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
85 | }
86 |
87 | POST_PROCESSING:
88 | RECALL_THRESH_LIST: [0.3, 0.5, 0.7]
89 | SCORE_THRESH: 0.1
90 | OUTPUT_RAW_SCORE: False
91 |
92 | EVAL_METRIC: waymo
93 |
94 | NMS_CONFIG:
95 | MULTI_CLASSES_NMS: False
96 | NMS_TYPE: nms_gpu
97 | NMS_THRESH: 0.01
98 | NMS_PRE_MAXSIZE: 4096
99 | NMS_POST_MAXSIZE: 500
100 |
101 |
102 | OPTIMIZATION:
103 | BATCH_SIZE_PER_GPU: 4
104 | NUM_EPOCHS: 30
105 |
106 | OPTIMIZER: adam_onecycle
107 | LR: 0.003
108 | WEIGHT_DECAY: 0.01
109 | MOMENTUM: 0.9
110 |
111 | MOMS: [0.95, 0.85]
112 | PCT_START: 0.4
113 | DIV_FACTOR: 10
114 | DECAY_STEP_LIST: [35, 45]
115 | LR_DECAY: 0.1
116 | LR_CLIP: 0.0000001
117 |
118 | LR_WARMUP: False
119 | WARMUP_EPOCH: 1
120 |
121 | GRAD_NORM_CLIP: 10
--------------------------------------------------------------------------------
/tools/demo.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import glob
3 | from pathlib import Path
4 |
5 | import mayavi.mlab as mlab
6 | import numpy as np
7 | import torch
8 |
9 | from pcdet.config import cfg, cfg_from_yaml_file
10 | from pcdet.datasets import DatasetTemplate
11 | from pcdet.models import build_network, load_data_to_gpu
12 | from pcdet.utils import common_utils
13 | from visual_utils import visualize_utils as V
14 |
15 |
16 | class DemoDataset(DatasetTemplate):
17 | def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
18 | """
19 | Args:
20 | root_path: 根目录
21 | dataset_cfg: 数据集配置
22 | class_names: 类别名称
23 | training: 训练模式
24 | logger: 日志
25 | ext: 扩展名
26 | """
27 | super().__init__(
28 | dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
29 | )
30 | self.root_path = root_path
31 | self.ext = ext
32 | # 获取根目录下的所有.bin文件
33 | data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
34 |
35 | data_file_list.sort() # 文件名排序
36 | self.sample_file_list = data_file_list
37 |
38 | def __len__(self):
39 | return len(self.sample_file_list)
40 |
41 | def __getitem__(self, index):
42 | if self.ext == '.bin':
43 | points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) # 加载数据
44 | elif self.ext == '.npy':
45 | points = np.load(self.sample_file_list[index])
46 | else:
47 | raise NotImplementedError
48 |
49 | # 组装数组字典
50 | input_dict = {
51 | 'points': points,
52 | 'frame_id': index,
53 | }
54 |
55 | data_dict = self.prepare_data(data_dict=input_dict) # 数据预处理
56 | return data_dict
57 |
58 |
59 | def parse_config():
60 | """
61 | 解析命令行参数:
62 | --cfg_file
63 | --data_path
64 | --ckpt
65 | --ext
66 | """
67 | parser = argparse.ArgumentParser(description='arg parser')
68 | parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
69 | help='specify the config for demo')
70 | parser.add_argument('--data_path', type=str, default='demo_data',
71 | help='specify the point cloud data file or directory')
72 | parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
73 | parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
74 |
75 | args = parser.parse_args()
76 |
77 | cfg_from_yaml_file(args.cfg_file, cfg) # 加载配置文件
78 |
79 | return args, cfg
80 |
81 |
82 | def main():
83 | args, cfg = parse_config() # 解析命令行参数及配置文件
84 | logger = common_utils.create_logger() # 创建日志
85 | logger.info('-----------------Quick Demo of OpenPCDet-------------------------')
86 | # 创建数据集
87 | demo_dataset = DemoDataset(
88 | dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
89 | root_path=Path(args.data_path), ext=args.ext, logger=logger
90 | )
91 | logger.info(f'Total number of samples: \t{len(demo_dataset)}')
92 | # 构建网络
93 | model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
94 | # 加载权重文件
95 | model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
96 | model.cuda() # 将网络放到GPU上
97 | model.eval() # 开启评估模式
98 | with torch.no_grad(): # 进行预测
99 | for idx, data_dict in enumerate(demo_dataset):
100 | logger.info(f'Visualized sample index: \t{idx + 1}')
101 | data_dict = demo_dataset.collate_batch([data_dict])
102 | load_data_to_gpu(data_dict) # 将数据放到GPU上
103 | pred_dicts, _ = model.forward(data_dict) # 模型前向传播
104 | # 绘制检测结果
105 | V.draw_scenes(
106 | points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'],
107 | ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']
108 | )
109 | # 将结果展示出来
110 | mlab.show(stop=True)
111 |
112 | logger.info('Demo done.')
113 |
114 |
115 | if __name__ == '__main__':
116 | main()
117 |
--------------------------------------------------------------------------------
/tools/train_utils/optimization/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | import torch.nn as nn
4 | import torch.optim as optim
5 | import torch.optim.lr_scheduler as lr_sched
6 |
7 | from .fastai_optim import OptimWrapper
8 | from .learning_schedules_fastai import CosineWarmupLR, OneCycle
9 |
10 |
11 | def build_optimizer(model, optim_cfg):
12 | # 如果采用adam优化器,则调用optim的Adam初始化
13 | if optim_cfg.OPTIMIZER == 'adam':
14 | optimizer = optim.Adam(model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
15 | # 如果采用sdg优化器,则调用optim的sgd初始化
16 | elif optim_cfg.OPTIMIZER == 'sgd':
17 | optimizer = optim.SGD(
18 | model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY,
19 | momentum=optim_cfg.MOMENTUM
20 | )
21 | # 如果采用adam_onecycle优化器
22 | elif optim_cfg.OPTIMIZER == 'adam_onecycle':
23 |
24 | def children(m: nn.Module):
25 | # 取出该模块中的各个子模块,组成list
26 | return list(m.children())
27 |
28 | def num_children(m: nn.Module) -> int:
29 | # 返回该模块子模块的长度
30 | return len(children(m))
31 |
32 | # 递归调用将基础层(Linear,Conv2d,BatchNorm2d,ReLU,ConvTranspose2d等)堆叠成list
33 | # a = [[1], [2], [3], [4], [5]]
34 | # sum(a, []) # [1, 2, 3, 4, 5]
35 | flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
36 | # 将所有基础层堆叠的list连接成序列
37 | get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
38 |
39 | optimizer_func = partial(optim.Adam, betas=(0.9, 0.99)) # 初始化adam优化器
40 | optimizer = OptimWrapper.create(
41 | optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True
42 | ) # wd:weight decay
43 | else:
44 | raise NotImplementedError
45 |
46 | return optimizer
47 |
48 |
49 | def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
50 | """
51 | 构建学习率调度器:三种方式adam_onecycle、LambdaLR、CosineWarmupLR
52 | Args:
53 | optimizer:优化器
54 | total_iters_each_epoch:一个epoch的迭代次数:982
55 | total_epoch:总共的epoch数:80
56 | last_epoch:上一次的epoch_id
57 | optim_cfg:优化配置
58 | """
59 | decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST] # [35, 45] --> [32480, 41760]
60 | # 自定义学习率调度函数
61 | def lr_lbmd(cur_epoch):
62 | # 当前学习率设置为1
63 | cur_decay = 1
64 | for decay_step in decay_steps:
65 | # 如果当前的epoch数>=节点值
66 | if cur_epoch >= decay_step:
67 | # 更新学习率
68 | cur_decay = cur_decay * optim_cfg.LR_DECAY # LR_DECAY: 0.1
69 | # 防止学习率过小
70 | return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR) # LR_CLIP: 0.0000001
71 |
72 | lr_warmup_scheduler = None
73 | total_steps = total_iters_each_epoch * total_epochs # 74240
74 | # 构建adam_onecycle学习率调度器
75 | if optim_cfg.OPTIMIZER == 'adam_onecycle':
76 | lr_scheduler = OneCycle(
77 | optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START
78 | ) # LR: 0.003, MOMS: [0.95, 0.85],DIV_FACTOR: 10, PCT_START: 0.4
79 | else:
80 | lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
81 | # 热身:在刚刚开始训练时以很小的学习率进行训练,使得网络熟悉数据,随着训练的进行学习率慢慢变大,
82 | # 到了一定程度,以设置的初始学习率进行训练,接着过了一些inter后,学习率再慢慢变小;学习率变化:上升——平稳——下降;
83 | if optim_cfg.LR_WARMUP:
84 | lr_warmup_scheduler = CosineWarmupLR(
85 | optimizer, T_max=optim_cfg.WARMUP_EPOCH * len(total_iters_each_epoch),
86 | eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR
87 | ) # WARMUP_EPOCH: 1
88 |
89 | return lr_scheduler, lr_warmup_scheduler
90 |
--------------------------------------------------------------------------------