├── .gitattributes ├── Pictures ├── Bounding Box.jpg ├── Clustering.jpg ├── Downsampling.jpg ├── Downsampling1.jpg ├── Formula .png ├── RANSAC.jpg ├── RANSAC1.png ├── RANSAC2.png ├── RANSAC3.png ├── description.png └── downsampling2.jpg ├── Project.ipynb ├── README.md ├── RSN_Final_Project_Report.pdf └── velodyne_decoder-master ├── .clang-format ├── .github └── workflows │ ├── build.yml │ └── publish.yml ├── .gitignore ├── CHANGELOG.md ├── CMakeLists.txt ├── LICENSE ├── README.md ├── include └── velodyne_decoder │ ├── calibration.h │ ├── config.h │ ├── packet_decoder.h │ ├── pointcloud_aggregator.h │ ├── scan_decoder.h │ ├── stream_decoder.h │ ├── time_conversion.h │ └── types.h ├── setup.cfg ├── setup.py ├── src ├── calibration.cpp ├── packet_decoder.cpp ├── python.cpp ├── scan_decoder.cpp └── stream_decoder.cpp ├── test ├── conftest.py └── test_velodyne_decoder.py └── velodyne_decoder ├── __init__.py └── calibrations ├── Alpha Prime.yml ├── HDL-32E.yml ├── HDL-64E.yml ├── Puck Hi-Res.yml ├── Puck LITE.yml ├── VLP-16.yml ├── VLP-32A.yml ├── VLP-32B.yml ├── VLP-32C.yml └── __init__.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /Pictures/Bounding Box.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/Bounding Box.jpg -------------------------------------------------------------------------------- /Pictures/Clustering.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/Clustering.jpg -------------------------------------------------------------------------------- /Pictures/Downsampling.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/Downsampling.jpg -------------------------------------------------------------------------------- /Pictures/Downsampling1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/Downsampling1.jpg -------------------------------------------------------------------------------- /Pictures/Formula .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/Formula .png -------------------------------------------------------------------------------- /Pictures/RANSAC.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/RANSAC.jpg -------------------------------------------------------------------------------- /Pictures/RANSAC1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/RANSAC1.png -------------------------------------------------------------------------------- /Pictures/RANSAC2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/RANSAC2.png -------------------------------------------------------------------------------- /Pictures/RANSAC3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/RANSAC3.png -------------------------------------------------------------------------------- /Pictures/description.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/description.png -------------------------------------------------------------------------------- /Pictures/downsampling2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/Pictures/downsampling2.jpg -------------------------------------------------------------------------------- /Project.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 11, 6 | "id": "014db83f-1aff-49fa-ba4f-69d348240ac5", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "from bagpy import bagreader\n", 11 | "import rospy\n", 12 | "import std_msgs.msg\n", 13 | "import sensor_msgs.point_cloud2 as pc2" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 74, 19 | "id": "c75fc76b-d651-4e01-934e-0f200bc143ad", 20 | "metadata": { 21 | "tags": [] 22 | }, 23 | "outputs": [], 24 | "source": [ 25 | "import pandas as pd\n", 26 | "import numpy as np\n", 27 | "import velodyne_decoder as vd\n", 28 | "import open3d as o3d\n", 29 | "from tqdm.auto import tqdm\n", 30 | "import matplotlib.pyplot as plt\n", 31 | "import time\n", 32 | "import pandas as pd" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 12, 38 | "id": "f5a92ffc-eda6-4b28-9f76-c516f6f201ed", 39 | "metadata": { 40 | "tags": [] 41 | }, 42 | "outputs": [ 43 | { 44 | "data": { 45 | "application/vnd.jupyter.widget-view+json": { 46 | "model_id": "866b2399c06b49e3a58f5e0b026654c7", 47 | "version_major": 2, 48 | "version_minor": 0 49 | }, 50 | "text/plain": [ 51 | "0it [00:00, ?it/s]" 52 | ] 53 | }, 54 | "metadata": {}, 55 | "output_type": "display_data" 56 | } 57 | ], 58 | "source": [ 59 | "config = vd.Config(model='VLP-16') \n", 60 | "bagfile = '/home/german/Desktop/Project/2017-10-18-17-33-13_0.bag'\n", 61 | "lidar_topics = ['/ns1/velodyne_packets'] # '/ns2/velodyne_packets'\n", 62 | "cloud_arrays = []\n", 63 | "for stamp, points, topic in tqdm(vd.read_bag(bagfile, config, lidar_topics)):\n", 64 | " cloud_arrays.append(points)" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 13, 70 | "id": "b9bbd884-0f12-40ee-8952-8f808126c3ed", 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "point_clouds = list(map(lambda x: x[:, :3], cloud_arrays))" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 14, 80 | "id": "abd7c5d3-7b4a-4817-a935-7fe8a924e0ac", 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "def down_segment_cluster(pcd, voxel_size, distance_threshold, ransac_n, num_iterations, eps, min_points):\n", 85 | " \n", 86 | " # Segmentation of the road and objects\n", 87 | " _, inliers = pcd.segment_plane(distance_threshold=distance_threshold, ransac_n=ransac_n, num_iterations=num_iterations)\n", 88 | " inlier_cloud = pcd.select_by_index(inliers)\n", 89 | " pcd = pcd.select_by_index(inliers, invert=True)\n", 90 | " inlier_cloud.paint_uniform_color([1,0,0])\n", 91 | " pcd.paint_uniform_color([0,0,1])\n", 92 | "\n", 93 | " # Voxel downsample to remove uneccessary points\n", 94 | " pcd_down = pcd.voxel_down_sample(voxel_size=voxel_size)\n", 95 | " \n", 96 | " # Clustering and Labeling\n", 97 | " with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n", 98 | " labels = np.array(pcd_down.cluster_dbscan(eps=eps, min_points=min_points, print_progress=False))\n", 99 | " max_label = labels.max()\n", 100 | " print(f\"point cloud has {max_label + 1} clusters\")\n", 101 | " colors = plt.get_cmap(\"tab20\")(labels / (max_label if max_label > 0 else 1))\n", 102 | " colors[labels < 0] = 0\n", 103 | " pcd_down.colors = o3d.utility.Vector3dVector(colors[:, :3])\n", 104 | " \n", 105 | " return labels, pcd_down, pcd" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 19, 111 | "id": "6d566a7f-8656-4829-a541-68268ce95cd6", 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "# o3d.visualization.draw_geometries([pcd])" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 80, 121 | "id": "8e472e55-d9d4-4a89-9dfb-1242e7bc0efe", 122 | "metadata": { 123 | "tags": [] 124 | }, 125 | "outputs": [], 126 | "source": [ 127 | "params = {'voxel_size': 0.3,\n", 128 | " 'distance_threshold': 0.2, \n", 129 | " 'ransac_n': 3, \n", 130 | " 'num_iterations': 500, \n", 131 | " 'eps': 0.9, \n", 132 | " 'min_points': 10}\n", 133 | "\n", 134 | "shorter_frames = list()\n", 135 | "for i in tqdm(range(0, 3331, 30)):\n", 136 | " # Create Point Clouds\n", 137 | " pcd = o3d.geometry.PointCloud()\n", 138 | " pcd.points = o3d.utility.Vector3dVector(point_clouds[i])\n", 139 | " # Create Clusters\n", 140 | " labels, pcd_out, pcd = down_segment_cluster(pcd, **params)\n", 141 | " # Append labels and cloud to list\n", 142 | " shorter_frames.append([labels, pcd_out, pcd])\n", 143 | " # break" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 21, 149 | "id": "f065ca83-5ede-4f5f-869e-65e4497f1e48", 150 | "metadata": {}, 151 | "outputs": [], 152 | "source": [ 153 | "k = 0\n", 154 | "labels = shorter_frames[k][0]\n", 155 | "cloud_clusters = shorter_frames[k][1]\n", 156 | "original_cloud = shorter_frames[k][2]" 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": 22, 162 | "id": "d7aa8011-5171-4bcd-94c5-48a81e4a32a2", 163 | "metadata": {}, 164 | "outputs": [], 165 | "source": [ 166 | "o3d.visualization.draw_geometries([cloud_clusters])" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 23, 172 | "id": "a442a731-a6a6-45e2-a417-7dde825a1d32", 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "def get_bounding_boxes(labels, pcd):\n", 177 | " \n", 178 | " \"\"\"\n", 179 | " Get the bounding boxes given the labels and the point cloud\n", 180 | " \n", 181 | " \"\"\"\n", 182 | " \n", 183 | " obbs = []\n", 184 | " indexes = pd.Series(range(len(labels))).groupby(labels, sort=False).apply(list).tolist()\n", 185 | "\n", 186 | " Max_Points = 1000\n", 187 | " Min_Points = 10\n", 188 | "\n", 189 | " for i in range(0, len(indexes)):\n", 190 | " nb_pts = len(pcd.select_by_index(indexes[i]).points)\n", 191 | " if (nb_pts > Min_Points and nb_pts < Max_Points):\n", 192 | " sub_cloud = pcd.select_by_index(indexes[i])\n", 193 | " obb = sub_cloud.get_axis_aligned_bounding_box()\n", 194 | " obb.color=(0,0,1)\n", 195 | " obbs.append(obb)\n", 196 | " \n", 197 | " return obbs" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 24, 203 | "id": "cc4e0aae-3d78-4394-bd40-10731757af2e", 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [ 207 | "bbxs = get_bounding_boxes(labels = labels, pcd = cloud_clusters)\n", 208 | "o3d.visualization.draw_geometries([cloud_clusters] + bbxs)" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 14, 214 | "id": "2b30e695-0fa4-46cf-93ef-4dfc29f374a7", 215 | "metadata": {}, 216 | "outputs": [ 217 | { 218 | "data": { 219 | "text/plain": [ 220 | "86" 221 | ] 222 | }, 223 | "execution_count": 14, 224 | "metadata": {}, 225 | "output_type": "execute_result" 226 | } 227 | ], 228 | "source": [ 229 | "max_clusters = np.array(list(map(lambda x: x[0].max()+1, shorter_frames))).max()\n", 230 | "max_clusters" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 25, 236 | "id": "b7fc2d17-91ee-4e95-814a-b6ce18bb3bca", 237 | "metadata": {}, 238 | "outputs": [], 239 | "source": [ 240 | "def hard_code_add_geometry(vis, bbxs):\n", 241 | " \n", 242 | " lines = [[0, 1], [1, 2], [2, 3], [0, 3], \n", 243 | " [4, 5], [5, 6], [6, 7], [4, 7],\n", 244 | " [0, 4], [1, 5], [2, 6], [3, 7]]\n", 245 | " \n", 246 | " for c in range(max_clusters):\n", 247 | " \n", 248 | " line_set = o3d.geometry.LineSet()\n", 249 | " line_set.lines = o3d.utility.Vector2iVector(lines)\n", 250 | " \n", 251 | " if c > len(bbxs) - 1:\n", 252 | " vis.add_geometry(line_set)\n", 253 | " \n", 254 | " else:\n", 255 | " line_set.points = bbxs[c].get_box_points()\n", 256 | " vis.add_geometry(line_set)\n", 257 | " return vis\n", 258 | "\n", 259 | "def hard_code_add_geometry2(vis, bbxs):\n", 260 | " \n", 261 | " lines = [[0, 1], [1, 2], [2, 3], [0, 3], \n", 262 | " [4, 5], [5, 6], [6, 7], [4, 7],\n", 263 | " [0, 4], [1, 5], [2, 6], [3, 7]]\n", 264 | " \n", 265 | " for c in range(max_clusters):\n", 266 | " \n", 267 | " vis.clear_geometries()\n", 268 | " \n", 269 | " line_set = o3d.geometry.LineSet()\n", 270 | " line_set.lines = o3d.utility.Vector2iVector(lines)\n", 271 | " \n", 272 | " if c > len(bbxs) - 1:\n", 273 | " # vis.add_geometry(line_set)\n", 274 | " pass\n", 275 | " \n", 276 | " else:\n", 277 | " line_set.points = bbxs[c].get_box_points()\n", 278 | " vis.add_geometry(line_set)\n", 279 | " return vis\n", 280 | "\n", 281 | "\n", 282 | "def hard_code_update_geometry(vis, new_bbxs):\n", 283 | " \n", 284 | " lines = [[0, 1], [1, 2], [2, 3], [0, 3], \n", 285 | " [4, 5], [5, 6], [6, 7], [4, 7],\n", 286 | " [0, 4], [1, 5], [2, 6], [3, 7]]\n", 287 | " \n", 288 | " for c in range(max_clusters):\n", 289 | " \n", 290 | " line_set = o3d.geometry.LineSet()\n", 291 | " line_set.lines = o3d.utility.Vector2iVector(lines)\n", 292 | " \n", 293 | " if c > len(new_bbxs) - 1:\n", 294 | " line_set = line_set.clear()\n", 295 | " vis.update_geometry(line_set)\n", 296 | " \n", 297 | " else:\n", 298 | " line_set = line_set.clear()\n", 299 | " line_set.points = new_bbxs[c].get_box_points()\n", 300 | " vis.update_geometry(line_set)\n", 301 | " \n", 302 | " return vis" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": 26, 308 | "id": "3dbbb5cc-641b-4464-a4ea-7b6721b0b9da", 309 | "metadata": {}, 310 | "outputs": [], 311 | "source": [ 312 | "lines = [[0, 1], [1, 2], [2, 3], [0, 3], \n", 313 | " [4, 5], [5, 6], [6, 7], [4, 7],\n", 314 | " [0, 4], [1, 5], [2, 6], [3, 7]]" 315 | ] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": 29, 320 | "id": "1d67a110-ffba-4e3f-a0de-c14f8a9fdcb6", 321 | "metadata": { 322 | "tags": [] 323 | }, 324 | "outputs": [], 325 | "source": [ 326 | "vis = o3d.visualization.Visualizer()\n", 327 | "vis.create_window()\n", 328 | "\n", 329 | "source = o3d.geometry.PointCloud()\n", 330 | "source.points = shorter_frames[0][1].points\n", 331 | "source.colors = shorter_frames[0][1].colors\n", 332 | "vis.add_geometry(source)\n", 333 | "\n", 334 | "bbxs = get_bounding_boxes(shorter_frames[0][0], shorter_frames[0][1])\n", 335 | "\n", 336 | "# vis = hard_code_add_geometry(vis, bbxs)\n", 337 | "\n", 338 | "# line_set = o3d.geometry.LineSet()\n", 339 | "# line_set.lines = o3d.utility.Vector2iVector(lines)\n", 340 | "# line_set.points = bbxs[10].get_box_points()\n", 341 | "# vis.add_geometry(line_set)\n", 342 | "\n", 343 | "# line_set = o3d.geometry.LineSet()\n", 344 | "# line_set = line_set.create_from_axis_aligned_bounding_box(bbxs[10])\n", 345 | "# vis.add_geometry(line_set)\n", 346 | "\n", 347 | "\n", 348 | "try:\n", 349 | " for frame_count in range(len(shorter_frames)):\n", 350 | " \n", 351 | " source.points = shorter_frames[frame_count][1].points\n", 352 | " source.colors = shorter_frames[frame_count][1].colors\n", 353 | " vis.update_geometry(source)\n", 354 | " \n", 355 | " new_bbxs = get_bounding_boxes(shorter_frames[frame_count][0], shorter_frames[frame_count][1])\n", 356 | " # vis = hard_code_add_geometry2(vis, bbxs)\n", 357 | " # line_set = line_set.create_from_axis_aligned_bounding_box(new_bbxs[10])\n", 358 | " \n", 359 | " # line_set.lines = o3d.utility.Vector2iVector(lines)\n", 360 | " # line_set.points = new_bbxs[10].get_box_points()\n", 361 | " # vis.update_geometry(line_set)\n", 362 | " \n", 363 | " # vis = hard_code_update_geometry(vis, new_bbxs)\n", 364 | " \n", 365 | " vis.poll_events()\n", 366 | " vis.update_renderer()\n", 367 | "\n", 368 | " # vis.capture_screen_image(\"D:\\\\EECE 5554\\\\Project\\\\Imgs\\\\temp_%05d.jpg\" % frame_count)\n", 369 | "finally:\n", 370 | " vis.destroy_window()" 371 | ] 372 | }, 373 | { 374 | "cell_type": "markdown", 375 | "id": "8158f72b-4e77-455a-b77a-11ef7af611f7", 376 | "metadata": { 377 | "tags": [] 378 | }, 379 | "source": [ 380 | "# Open3d ML" 381 | ] 382 | }, 383 | { 384 | "cell_type": "code", 385 | "execution_count": 65, 386 | "id": "5b9d1051-176d-4b4c-bc06-c06ca77a3e16", 387 | "metadata": {}, 388 | "outputs": [], 389 | "source": [ 390 | "import tensorflow as tf\n", 391 | "import open3d.ml.tf as ml3d\n", 392 | "# import open3d.ml.torch as ml3d" 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 56, 398 | "id": "99b097e4-46a0-43c0-8a0c-ce77ec495cfa", 399 | "metadata": {}, 400 | "outputs": [], 401 | "source": [ 402 | "import os\n", 403 | "import open3d.ml as _ml3d" 404 | ] 405 | }, 406 | { 407 | "cell_type": "code", 408 | "execution_count": 49, 409 | "id": "459555e4-4131-4239-b516-498be6e8e274", 410 | "metadata": {}, 411 | "outputs": [], 412 | "source": [ 413 | "def prepare_point_cloud_for_inference(pcd):\n", 414 | " \n", 415 | " # Remove NaNs and infinity values\n", 416 | " pcd.remove_non_finite_points()\n", 417 | " \n", 418 | " # Extract the xyz points\n", 419 | " xyz = np.asarray(pcd.points)\n", 420 | " \n", 421 | " # Set the points to the correct format for inference\n", 422 | " data = {\"point\":xyz, 'feat': None, \n", 423 | " 'label':np.zeros((len(xyz),), dtype=np.int32)}\n", 424 | " \n", 425 | " return data, pcd" 426 | ] 427 | }, 428 | { 429 | "cell_type": "code", 430 | "execution_count": 33, 431 | "id": "86bc7aca-19c3-4ad3-a7a3-ed42990f767d", 432 | "metadata": {}, 433 | "outputs": [], 434 | "source": [ 435 | "data, pcd = prepare_point_cloud_for_inference(cloud_clusters)" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": 76, 441 | "id": "4e3029a5-c367-4697-8516-7bed8a31a8df", 442 | "metadata": { 443 | "tags": [] 444 | }, 445 | "outputs": [], 446 | "source": [ 447 | "cfg_file = \"Open3D-ML/ml3d/configs/pointpillars_kitti.yml\" ################################ FOLDER FROM GITHUB REPOSITORY ######################################################\n", 448 | "cfg = _ml3d.utils.Config.load_from_file(cfg_file)\n", 449 | "model = ml3d.models.PointPillars(**cfg.model)\n", 450 | "pipeline = ml3d.pipelines.ObjectDetection(model, **cfg.pipeline)" 451 | ] 452 | }, 453 | { 454 | "cell_type": "code", 455 | "execution_count": 54, 456 | "id": "a78cdd8d-0ca1-4f76-a09d-478127d447ab", 457 | "metadata": { 458 | "tags": [] 459 | }, 460 | "outputs": [], 461 | "source": [ 462 | "# load the parameters (1) ###################################### IF THIS DOESN'T WORK, TRY (2)\n", 463 | "\n", 464 | "ckpt_folder = \"./logs/\"\n", 465 | "os.makedirs(ckpt_folder, exist_ok=True)\n", 466 | "ckpt_path = ckpt_folder + \"pointpillars_kitti_202012221652utc.pth\"\n", 467 | "pointpillar_url = \"https://storage.googleapis.com/open3d-releases/model-zoo/pointpillars_kitti_202012221652utc.pth\"\n", 468 | "if not os.path.exists(ckpt_path):\n", 469 | " cmd = \"wget {} -O {}\".format(pointpillar_url, ckpt_path)\n", 470 | " os.system(cmd)\n", 471 | "pipeline.load_ckpt(ckpt_path)" 472 | ] 473 | }, 474 | { 475 | "cell_type": "code", 476 | "execution_count": 78, 477 | "id": "d92bde14-2ce4-49e0-89fb-754a260f939a", 478 | "metadata": { 479 | "tags": [] 480 | }, 481 | "outputs": [ 482 | { 483 | "data": { 484 | "text/plain": [ 485 | "0" 486 | ] 487 | }, 488 | "execution_count": 78, 489 | "metadata": {}, 490 | "output_type": "execute_result" 491 | } 492 | ], 493 | "source": [ 494 | "# load the parameters (2)\n", 495 | "pipeline.load_ckpt(ckpt_path='/home/german/Desktop/Project/pointpillars_kitti_202012221652utc/ckpt-12.index') ################################ FOLDER FROM DOWNLOADED WEIGHTS ######################################################" 496 | ] 497 | }, 498 | { 499 | "cell_type": "code", 500 | "execution_count": 62, 501 | "id": "2262dd26-2d37-4edf-b9e4-2d7550384f8c", 502 | "metadata": {}, 503 | "outputs": [ 504 | { 505 | "data": { 506 | "text/plain": [ 507 | "{'point': array([[ 32.37499619, -11.50275421, 5.44173002],\n", 508 | " [ 32.38158798, -11.4732132 , 9.20515823],\n", 509 | " [ 32.72634125, -11.4862566 , 4.25860262],\n", 510 | " ...,\n", 511 | " [ 11.91393216, 18.31762505, -0.38141652],\n", 512 | " [ 14.94410515, 19.95416514, 6.67994722],\n", 513 | " [ 14.08106613, 18.19886716, 1.20592519]]),\n", 514 | " 'feat': None,\n", 515 | " 'label': array([0, 0, 0, ..., 0, 0, 0], dtype=int32)}" 516 | ] 517 | }, 518 | "execution_count": 62, 519 | "metadata": {}, 520 | "output_type": "execute_result" 521 | } 522 | ], 523 | "source": [ 524 | "data" 525 | ] 526 | }, 527 | { 528 | "cell_type": "code", 529 | "execution_count": 79, 530 | "id": "801104ca-d146-4108-9aa0-11f4be272f46", 531 | "metadata": {}, 532 | "outputs": [ 533 | { 534 | "ename": "KeyError", 535 | "evalue": "'Exception encountered when calling layer \"point_pillars_4\" (type PointPillars).\\n\\n0\\n\\nCall arguments received:\\n • inputs={\\'point\\': \\'tf.Tensor(shape=(5885, 3), dtype=float32)\\', \\'feat\\': \\'None\\', \\'label\\': \\'tf.Tensor(shape=(5885,), dtype=int32)\\'}\\n • training=False'", 536 | "output_type": "error", 537 | "traceback": [ 538 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 539 | "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", 540 | "\u001b[0;32m/tmp/ipykernel_39566/3584476445.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpipeline\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_inference\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", 541 | "\u001b[0;32m~/anaconda3/envs/o3d/lib/python3.7/site-packages/open3d/_ml3d/tf/pipelines/object_detection.py\u001b[0m in \u001b[0;36mrun_inference\u001b[0;34m(self, data)\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 52\u001b[0;31m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 53\u001b[0m \u001b[0mboxes\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minference_end\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresults\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", 542 | "\u001b[0;32m~/anaconda3/envs/o3d/lib/python3.7/site-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiltered_tb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 68\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0mfiltered_tb\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 543 | "\u001b[0;32m~/anaconda3/envs/o3d/lib/python3.7/site-packages/open3d/_ml3d/tf/models/point_pillars.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training)\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0mparam\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mtoggle\u001b[0m \u001b[0mtraining\u001b[0m \u001b[0mrun\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 141\u001b[0m \"\"\"\n\u001b[0;32m--> 142\u001b[0;31m \u001b[0minputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0munpack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 143\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextract_feats\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 144\u001b[0m \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbbox_head\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 544 | "\u001b[0;31mKeyError\u001b[0m: 'Exception encountered when calling layer \"point_pillars_4\" (type PointPillars).\\n\\n0\\n\\nCall arguments received:\\n • inputs={\\'point\\': \\'tf.Tensor(shape=(5885, 3), dtype=float32)\\', \\'feat\\': \\'None\\', \\'label\\': \\'tf.Tensor(shape=(5885,), dtype=int32)\\'}\\n • training=False'" 545 | ] 546 | } 547 | ], 548 | "source": [ 549 | "result = pipeline.run_inference(data)" 550 | ] 551 | }, 552 | { 553 | "cell_type": "markdown", 554 | "id": "3b68a435-4a92-45d9-a4ac-39a83c60bf8c", 555 | "metadata": { 556 | "jp-MarkdownHeadingCollapsed": true, 557 | "tags": [] 558 | }, 559 | "source": [ 560 | "# Create Video" 561 | ] 562 | }, 563 | { 564 | "cell_type": "code", 565 | "execution_count": null, 566 | "id": "fc13ca0b-c1bd-4968-9dc6-6dbe47313f4b", 567 | "metadata": {}, 568 | "outputs": [], 569 | "source": [ 570 | "import glob\n", 571 | "import cv2\n", 572 | "import os" 573 | ] 574 | }, 575 | { 576 | "cell_type": "code", 577 | "execution_count": null, 578 | "id": "983a7752-2d82-41e9-b068-3ac00e08b99f", 579 | "metadata": {}, 580 | "outputs": [], 581 | "source": [ 582 | "size = (1920, 1055)\n", 583 | "out = cv2.VideoWriter(os.path.join(\"D:\\\\EECE 5554\\\\Project\\\\\", 'Video.mp4'), cv2.VideoWriter_fourcc(*'DIVX'), 30, size)\n", 584 | "\n", 585 | "for pcd_img in tqdm(sorted(glob.glob(\"D:\\\\EECE 5554\\\\Project\\\\Imgs\\\\*\"))):\n", 586 | " pcd_img = cv2.imread(pcd_img)\n", 587 | " out.write(pcd_img)\n", 588 | "out.release()" 589 | ] 590 | } 591 | ], 592 | "metadata": { 593 | "kernelspec": { 594 | "display_name": "Python 3 (ipykernel)", 595 | "language": "python", 596 | "name": "python3" 597 | }, 598 | "language_info": { 599 | "codemirror_mode": { 600 | "name": "ipython", 601 | "version": 3 602 | }, 603 | "file_extension": ".py", 604 | "mimetype": "text/x-python", 605 | "name": "python", 606 | "nbconvert_exporter": "python", 607 | "pygments_lexer": "ipython3", 608 | "version": "3.7.15" 609 | } 610 | }, 611 | "nbformat": 4, 612 | "nbformat_minor": 5 613 | } 614 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LiDAR point-cloud based 3D object detection 2 | Object detection is a key component in advanced driver assistance systems (ADAS), which allow cars to detect driving lanes and pedestrians to improve road safety. This report describes a modern approach for 3D Object Detection using LiDAR while driving on the road. The system includes a Velodyne VLP-16 LiDAR sensor to capture real-time scenarios. Using Open3d, we perform the following: segmentation, RANSAC, DBSCAN, Voxel-Grid Downsampling, clustering, and detection using bounding boxes. The experimental results confirm the detection of objects such as pedestrians, cyclists, and other vehicles. 3 | 4 | To get the LiDAR_pedestrian_bicyclist_cars dataset ROSbag file visit this link: 5 | - https://northeastern-my.sharepoint.com/:f:/r/personal/singh_risha_northeastern_edu/Documents/LiDAR%20bicycle_Pedestrian%20data?csf=1&web=1&e=rjat17 6 | 7 | # Method 8 | LiDAR is an acronym for Light Detection And Ranging. Eye-safe laser beams are used to create a 3D representation of the environment. The LiDAR sensor emits pulsed light waves from a laser into the surrounding area. These pulses bounce off surrounding objects and return to the sensor. The sensor uses the time it took for each pulse to return to the sensor to calculate the distance it traveled. Repeating this process multiple times per second creates a real-time 3D point cloud map of the environment. 9 | 10 | The dataset are available here. The dataset was collected using Velodyne’s VLP-16 LiDAR model. Each packet contains the data from 24 firing sequences in 12 data blocks where each data block contains information from two firing sequences of 16 lasers. 11 | 12 | # Principle behind Algorithms 13 | # 1. Voxel-Grid Down-sampling 14 | After extraction, we obtain a large number of points to process which we wish to reduce for computational efficiency. However, we also wish to retain the data’s key features. Because of this, we use Voxel grids which are 3D boxes or cells that can hold multiple points. The points present in each voxel (i.e., 3D box), are approximated with their centroid. This represents an accurate structure of the surface by points that are equidistant from each other. While retaining the essential features of the data set, we reduced the number of points seen in each frame to around 6,500 from the original 23,000 data points. 15 | # 2. RANSAC: RANdom Sampling and Consensus 16 | The core RANSAC algorithm is fairly straightforward: 17 | 1. Select a random set of points (3 points to form a plane). 18 | 2. Calculate the parameters required for the plane equation. 19 | 3. Calculate the distance of all the points in the point cloud from the plane. 20 | 4. If the distance is within the THRESHOLD then add the point as an inlier. 21 | 5. Store the plane points and points having the maximum number of inliers. 22 | 6. Repeat the process again for MAX-ITERATIONS. 23 | 24 | 25 | 26 | 27 | After RANSAC is complete, the plane having the maximum number of inliers is the best estimate of the ground plane (i.e road plane). Using this model-based algorithm, the ground plane can be eliminated, hence the obstacles in the field of view of the sensor can be more efficiently localized and detected. 28 | 29 | # 3. DBSCAN: Density-Based Spatial Clustering of Applications with Noise 30 | DBSCAN stands for Density-Based Spatial Clustering of Applications with Noise. The algorithm can 31 | be summarized as follows: 32 | 1. Pick a point in the dataset (until all points have been visited). 33 | 2. If there are at least MINPOINTS points within a radius of EPS to the point selected, then we consider all these points to be part of the same cluster. 34 | 3. The clusters are then expanded by repeating this calculation for each neighboring point. DBSCAN is a robust mechanism for clustering by removing noise as outliers. Its advantages over other methods include being able to detect arbitrary clusters and being independent of the requirement to predefine the number of clusters like k-means clustering. 35 | 36 | # 4. Bounding box Detection 37 | To draw bounding boxes around the clusters of points, we get the labels of each cluster and group them together. Using these labels, we get the indices of the actual points to get the axis-aligned bounding boxes. 38 | 39 | # Analysis of approach 40 | To begin our approach, we can first visualize the data in point clouds. By extracting our data with Velodyne-decoder and using Open3d for point cloud processing, we can see the resulting point clouds of a single frame in our data. To segment the road plane out of the data, we use the previously 41 | described RANSAC algorithm. The road plane, labeled in red, is well-defined and can be separated from the data to reduce the complexity and possible noise. 42 | 43 | Initial point clouds of a single frame | Result after segmentation | 44 | :-------------------------:|:------------------------:| 45 | | | | 46 | 47 | With only the outlier points, we still have around 23,000 points in a frame, therefore, using Voxel downsampling helps us reduce this complexity to around 6,500 points. We can see a before and after. Despite reducing the complexity by a factor of 3, the contents seen in the point clouds are not affected as clusters of points are not affected, rather the number of points that form those clusters. 48 | 49 | Before running voxel downsampling | After running voxel downsampling | 50 | :-------------------------:|:------------------------:| 51 | | | | 52 | 53 | Finally, using DBSCAN, we can cluster the points into objects. We can see the final objects detected. Given these clusters, we can place bounding boxes on the detected objects. 54 | 55 | # Final Results 56 | we were able to detect the objects accurately in real time. As seen from the final results, it may not always be perfectly accurate because of the down-sampling we did with voxels, which may result in data loss due to saturation of point density. <- Not entirely accurate But the current results seen show that our task of detection can be achieved with satisfying results. 57 | 58 | Results after clustering | Clusters with bounding boxes | 59 | :-------------------------:|:------------------------:| 60 | | | | 61 | 62 | # References : 63 | 1. Velodyne lidar puck. https://www.amtechs.co.jp/product/VLP-16-Puck.pdf. 64 | 2. Downsampling a pointcloud using a voxelgrid filter. https://adioshun.gitbooks.io/pcl/content/Tutorial/Filtering/pcl-cpp-downsampling-a-pointcloud-using-a-voxelgrid-filter.html, 2022. 65 | 3. Downsampling a pointcloud using a voxelgrid filter — point cloud library 0.0 documentation. https://pcl.readthedocs.io/en/latest/voxel_grid.html, 2022. 66 | 4. What is lidar? learn how lidar works, velodyne lidar. https://velodynelidar.com/what-is-lidar/, 2022. 67 | 5. Nagesh Chauhan. Dbscan clustering algorithm in machine learning - kdnuggets. https://www.kdnuggets.com/2020/04/dbscan-clustering-algorithm-machine-learning.html, 2022. 68 | 6. Martin Simon, Stefan Milz, Karl Amende, and Horst-Michael Gross.Complex-yolo: Real-time 3d object detection on point clouds. arXiv, 2018. 69 | 7. Martin Valgur. Velodyne decoder. https://github.com/valgur/velodyne_decoder, 2022. 70 | 8. Leah A. Wasser. The basics of lidar - light detection and ranging - remote sensing. https://www.neonscience.org/resources/learning-hub/tutorials/lidar-basics, 2022. 71 | 9. Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3D: A modern library for 3D data processing. arXiv:1801.09847, 2018. 72 | -------------------------------------------------------------------------------- /RSN_Final_Project_Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobuRishabh/LiDAR-point-cloud-based-3D-object-detection/091f5c00c60cb00a421ace0e3635da1e3567cb3d/RSN_Final_Project_Report.pdf -------------------------------------------------------------------------------- /velodyne_decoder-master/.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: LLVM 2 | ColumnLimit: 100 3 | TabWidth: 2 4 | IndentPPDirectives: AfterHash 5 | AlignConsecutiveAssignments: true 6 | -------------------------------------------------------------------------------- /velodyne_decoder-master/.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | on: 3 | push: 4 | paths-ignore: 5 | - '**.md' 6 | - 'LICENSE' 7 | - '.gitignore' 8 | - 'assets/**' 9 | pull_request: 10 | paths-ignore: 11 | - '**.md' 12 | - 'LICENSE' 13 | - '.gitignore' 14 | - 'assets/**' 15 | jobs: 16 | build: 17 | runs-on: ${{ matrix.os }} 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | os: [ ubuntu-latest, windows-latest, macos-latest ] 22 | python-version: [ '2.7', '3.6', '3.10' ] 23 | steps: 24 | - uses: actions/checkout@v2 25 | 26 | - name: Set up Python 27 | uses: actions/setup-python@v2 28 | with: 29 | python-version: ${{ matrix.python-version }} 30 | 31 | - name: Install cmake, wheel 32 | run: | 33 | python -m pip install --upgrade pip 34 | pip install --upgrade build wheel cmake setuptools 35 | 36 | - name: Build and install 37 | run: pip install . -v 38 | 39 | - name: Install Python 2 test dependencies 40 | if: matrix.python-version == '2.7' 41 | run: pip install "rospy<1.15" "rosbag<1.15" --extra-index-url https://rospypi.github.io/simple/ 42 | 43 | - name: Install test dependencies 44 | run: pip install .[tests] --extra-index-url https://rospypi.github.io/simple/ 45 | 46 | - name: Run tests 47 | run: pytest test --color=yes -v 48 | -------------------------------------------------------------------------------- /velodyne_decoder-master/.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | on: 3 | release: 4 | types: [ published ] 5 | workflow_dispatch: 6 | jobs: 7 | build_sdist: 8 | name: Build SDist 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v2 12 | 13 | - name: Install dependencies 14 | run: | 15 | python -m pip install --upgrade pip 16 | python -m pip install --upgrade build twine 17 | 18 | - name: Build SDist 19 | run: python -m build --sdist 20 | 21 | - name: Check metadata 22 | run: python -m twine check dist/* 23 | 24 | - uses: actions/upload-artifact@v2 25 | with: 26 | path: dist/*.tar.gz 27 | 28 | build_wheels: 29 | name: Wheels on ${{ matrix.os }} 30 | runs-on: ${{ matrix.os }} 31 | strategy: 32 | fail-fast: false 33 | matrix: 34 | os: [ ubuntu-latest, windows-latest, macos-latest ] 35 | steps: 36 | - uses: actions/checkout@v2 37 | 38 | - uses: pypa/cibuildwheel@v2.3.1 39 | env: 40 | CIBW_BEFORE_BUILD: pip install --prefer-binary --upgrade cmake numpy 41 | CIBW_ARCHS_LINUX: x86_64 42 | CIBW_ARCHS_WINDOWS: auto64 43 | CIBW_ARCHS_MACOS: auto64 44 | # Disable building PyPy wheels on all platforms 45 | CIBW_SKIP: pp* 46 | 47 | - name: Verify clean directory 48 | run: git diff --exit-code 49 | shell: bash 50 | 51 | - name: Upload wheels 52 | uses: actions/upload-artifact@v2 53 | with: 54 | path: wheelhouse/*.whl 55 | 56 | upload_all: 57 | name: Upload if release 58 | needs: [ build_wheels, build_sdist ] 59 | runs-on: ubuntu-latest 60 | if: github.event_name == 'release' && github.event.action == 'published' 61 | steps: 62 | - uses: actions/setup-python@v2 63 | 64 | - uses: actions/download-artifact@v2 65 | with: 66 | name: artifact 67 | path: dist 68 | 69 | - uses: pypa/gh-action-pypi-publish@release/v1 70 | with: 71 | user: __token__ 72 | password: ${{ secrets.PYPI_API_TOKEN }} 73 | -------------------------------------------------------------------------------- /velodyne_decoder-master/.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Python template 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 100 | __pypackages__/ 101 | 102 | # Celery stuff 103 | celerybeat-schedule 104 | celerybeat.pid 105 | 106 | # SageMath parsed files 107 | *.sage.py 108 | 109 | # Environments 110 | .env 111 | .venv 112 | env/ 113 | venv/ 114 | ENV/ 115 | env.bak/ 116 | venv.bak/ 117 | 118 | # Spyder project settings 119 | .spyderproject 120 | .spyproject 121 | 122 | # Rope project settings 123 | .ropeproject 124 | 125 | # mkdocs documentation 126 | /site 127 | 128 | # mypy 129 | .mypy_cache/ 130 | .dmypy.json 131 | dmypy.json 132 | 133 | # Pyre type checker 134 | .pyre/ 135 | 136 | # pytype static type analyzer 137 | .pytype/ 138 | 139 | # Cython debug symbols 140 | cython_debug/ 141 | 142 | ### Linux template 143 | *~ 144 | 145 | # temporary files which can be created if a process still has a handle open of a deleted file 146 | .fuse_hidden* 147 | 148 | # KDE directory preferences 149 | .directory 150 | 151 | # Linux trash folder which might appear on any partition or disk 152 | .Trash-* 153 | 154 | # .nfs files are created when an open file is removed but is still being accessed 155 | .nfs* 156 | 157 | ### C++ template 158 | # Prerequisites 159 | *.d 160 | 161 | # Compiled Object files 162 | *.slo 163 | *.lo 164 | *.o 165 | *.obj 166 | 167 | # Precompiled Headers 168 | *.gch 169 | *.pch 170 | 171 | # Compiled Dynamic libraries 172 | *.so 173 | *.dylib 174 | *.dll 175 | 176 | # Fortran module files 177 | *.mod 178 | *.smod 179 | 180 | # Compiled Static libraries 181 | *.lai 182 | *.la 183 | *.a 184 | *.lib 185 | 186 | # Executables 187 | *.exe 188 | *.out 189 | *.app 190 | 191 | ### ROS template 192 | devel/ 193 | logs/ 194 | build/ 195 | bin/ 196 | lib/ 197 | msg_gen/ 198 | srv_gen/ 199 | msg/*Action.msg 200 | msg/*ActionFeedback.msg 201 | msg/*ActionGoal.msg 202 | msg/*ActionResult.msg 203 | msg/*Feedback.msg 204 | msg/*Goal.msg 205 | msg/*Result.msg 206 | msg/_*.py 207 | build_isolated/ 208 | devel_isolated/ 209 | 210 | # Generated by dynamic reconfigure 211 | *.cfgc 212 | /cfg/cpp/ 213 | /cfg/*.py 214 | 215 | # Ignore generated docs 216 | *.dox 217 | *.wikidoc 218 | 219 | # eclipse stuff 220 | .project 221 | .cproject 222 | 223 | # qcreator stuff 224 | CMakeLists.txt.user 225 | 226 | srv/_*.py 227 | *.pcd 228 | *.pyc 229 | qtcreator-* 230 | *.user 231 | 232 | /planning/cfg 233 | /planning/docs 234 | /planning/src 235 | 236 | *~ 237 | 238 | # Emacs 239 | .#* 240 | 241 | # Catkin custom files 242 | CATKIN_IGNORE 243 | 244 | ### VisualStudioCode template 245 | .vscode/ 246 | 247 | # Local History for Visual Studio Code 248 | .history/ 249 | 250 | ### JetBrains template 251 | .idea/ 252 | cmake-build-* 253 | -------------------------------------------------------------------------------- /velodyne_decoder-master/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [2.3.0] – 2022-03-20 4 | 5 | ### Changed 6 | 7 | - `VLS-128` has been replaced by `Alpha Prime` as the model identifier, but is still accepted for backwards compatibility. (@flopie2009 #1) 8 | - Removed the broken VLS-128 calibration data file. (@flopie2009 #2) 9 | 10 | ## [2.2.0] – 2021-12-10 11 | 12 | ### Added 13 | 14 | - Added `time_range` parameter to `read_bag()` and `read_pcap()`. 15 | 16 | ### Fixed 17 | 18 | - `read_bag()` no longer closes opened `Bag` objects passed to it. 19 | 20 | ## [2.1.0] – 2021-12-08 21 | 22 | ### Added 23 | 24 | - `read_bag()` now optionally also includes `frame_id`-s from the ROS messages in the returned tuples 25 | when `return_frame_id=True` is set. 26 | 27 | ### Fixed 28 | 29 | - Fixed `Config("model")` constructor not working as expected due to `calibration_file` missing a default value. 30 | - Fixed out-of-scope pointer dereferencing in the calibration parser. 31 | - Fixed a build error on Windows due to yaml-cpp by pinning its version to 0.7. 32 | 33 | ## [2.0.0] – 2021-11-11 34 | 35 | ### Added 36 | 37 | - Support for PCAP file decoding with the `read_pcap()` function in Python and `StreamDecoder` in C++. 38 | - More convenient data extraction from ROS bags with `read_bag()`. 39 | - Standard calibration info provided by Velodyne is now used by default and `config.calibration_file` can be left unset 40 | in most cases. 41 | - `TIMINGS_AVAILABLE` was added for details about which models support point timing info. The current list is: 42 | `['HDL-32E', 'VLP-16', 'VLP-32C', 'VLS-128']`. 43 | 44 | ### Changed 45 | 46 | - The decoded scans are now returned as a contiguous float32 NumPy arrays instead of PCL-compatible structs by default. 47 | You can pass `as_pcl_structs=True` to the decoding functions for the old behavior. 48 | - Model ID naming scheme has been updated to better match the typical forms used by Velodyne.
49 | Before: `['VLP16', '32C', '32E', 'VLS128']`.
50 | After: `['HDL-32E', 'HDL-64E', 'HDL-64E_S2', 'HDL-64E_S3', 'VLP-16', 'VLP-32C', 'VLS-128']`. 51 | - Conversion to NumPy arrays is now done without any unnecessary data copying. 52 | 53 | ### Fixed 54 | 55 | - Fixed a Python 2 ROS message decoding error. 56 | 57 | ## [1.0.1] – 2021-05-19 58 | 59 | Initial release. 60 | 61 | [2.3.0]: https://github.com/valgur/velodyne_decoder/compare/v2.2.0...v2.3.0 62 | 63 | [2.2.0]: https://github.com/valgur/velodyne_decoder/compare/v2.1.0...v2.2.0 64 | 65 | [2.1.0]: https://github.com/valgur/velodyne_decoder/compare/v2.0.0...v2.1.0 66 | 67 | [2.0.0]: https://github.com/valgur/velodyne_decoder/compare/v1.0.1...v2.0.0 68 | 69 | [1.0.1]: https://github.com/valgur/velodyne_decoder/releases/tag/v1.0.1 70 | -------------------------------------------------------------------------------- /velodyne_decoder-master/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.14) 2 | project(velodyne_decoder CXX) 3 | 4 | set(CMAKE_CXX_STANDARD 17) 5 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 6 | set(CMAKE_CXX_EXTENSIONS OFF) 7 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 8 | 9 | if(NOT CMAKE_BUILD_TYPE) 10 | message(STATUS "No build type selected, default to Release") 11 | set(CMAKE_BUILD_TYPE "Release") 12 | endif() 13 | 14 | if(MSVC) 15 | add_compile_options(/W4 /O2) 16 | else() 17 | add_compile_options( 18 | "$<$:-ggdb3;-Og>" 19 | "$<$:-ggdb3;-O3>" 20 | "$<$:-O3>" 21 | -Wall 22 | -Wno-sign-compare 23 | -Wextra 24 | # disabled since pedantic complains about 'ISO C++ prohibits anonymous structs' for PointXYZIRT 25 | # -Wpedantic 26 | ) 27 | endif() 28 | 29 | # version ranges are only available in CMake 3.19+ 30 | #find_package(yaml-cpp 0.6...<2 QUIET) 31 | find_package(yaml-cpp 0.6 QUIET) 32 | if(NOT yaml-cpp_FOUND) 33 | include(FetchContent) 34 | FetchContent_Declare(yaml-cpp 35 | GIT_REPOSITORY https://github.com/jbeder/yaml-cpp.git 36 | GIT_SHALLOW ON 37 | GIT_TAG yaml-cpp-0.7.0 38 | ) 39 | FetchContent_MakeAvailable(yaml-cpp) 40 | set(YAML_CPP_LIBRARIES yaml-cpp::yaml-cpp) 41 | endif() 42 | 43 | if(DEFINED VERSION_INFO) 44 | add_compile_definitions(VERSION_INFO=${VERSION_INFO}) 45 | endif() 46 | 47 | add_library(${PROJECT_NAME} STATIC 48 | src/calibration.cpp 49 | src/packet_decoder.cpp 50 | src/scan_decoder.cpp 51 | src/stream_decoder.cpp 52 | ) 53 | target_link_libraries(${PROJECT_NAME} PRIVATE ${YAML_CPP_LIBRARIES}) 54 | target_include_directories(${PROJECT_NAME} PUBLIC include PRIVATE SYSTEM ${YAML_CPP_INCLUDE_DIRS}) 55 | 56 | if(BUILD_PYTHON) 57 | #find_package(pybind11 2.6...<3 QUIET) 58 | find_package(pybind11 2.6 QUIET) 59 | if(NOT pybind11_FOUND) 60 | include(FetchContent) 61 | FetchContent_Declare(pybind11 62 | GIT_REPOSITORY https://github.com/pybind/pybind11.git 63 | GIT_TAG stable 64 | GIT_SHALLOW ON 65 | ) 66 | FetchContent_MakeAvailable(pybind11) 67 | endif() 68 | 69 | pybind11_add_module(${PROJECT_NAME}_pylib src/python.cpp) 70 | target_include_directories(${PROJECT_NAME}_pylib PRIVATE include) 71 | target_link_libraries(${PROJECT_NAME}_pylib PRIVATE ${PROJECT_NAME}) 72 | endif() 73 | -------------------------------------------------------------------------------- /velodyne_decoder-master/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2021, Martin Valgur 4 | Copyright (c) 2007-2021 Austin Robot Technology, and others 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are met: 9 | 10 | 1. Redistributions of source code must retain the above copyright notice, this 11 | list of conditions and the following disclaimer. 12 | 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | 3. Neither the name of the copyright holder nor the names of its 18 | contributors may be used to endorse or promote products derived from 19 | this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /velodyne_decoder-master/README.md: -------------------------------------------------------------------------------- 1 | # velodyne_decoder [![PyPI](https://img.shields.io/pypi/v/velodyne-decoder)](https://pypi.org/project/velodyne-decoder/) [![Build](https://github.com/valgur/velodyne_decoder/actions/workflows/build.yml/badge.svg?event=push)](https://github.com/valgur/velodyne_decoder/actions/workflows/build.yml) [![PyPI - Downloads](https://img.shields.io/pypi/dm/velodyne-decoder)](https://pypistats.org/packages/velodyne-decoder) 2 | 3 | Python package and C++ library for Velodyne packet decoding. Point cloud extraction from PCAP and ROS bag files is 4 | supported out of the box. 5 | 6 | The decoded point clouds are provided either as a structured NumPy array: 7 | 8 | ```python 9 | array([(8.327308, -2.161341, 0.3599853, 85., 17, -0.04960084), 10 | (8.323784, -2.9578836, 0.27016047, 102., 15, -0.04959854), 11 | (8.184404, -2.845847, -0.8741639, 39., 2, -0.04959623), ..., 12 | (8.369528, -2.8161895, 2.307987, 17., 31, 0.00064051), 13 | (8.377898, -3.2570598, 1.7714221, 104., 30, 0.00064282), 14 | (8.358282, -2.8030438, 0.31229734, 104., 16, 0.00064282)], 15 | dtype={'names': ['x', 'y', 'z', 'intensity', 'ring', 'time'], 16 | 'formats': ['> > velodyne_decoder.Config.SUPPORTED_MODELS 98 | ['HDL-32E', 'HDL-64E', 'HDL-64E_S2', 'HDL-64E_S3', 'VLP-16', 'VLP-32C', 'Alpha Prime'] 99 | ``` 100 | 101 | Note that timing info is available for only a subset of the models: 102 | 103 | ```python 104 | >> > velodyne_decoder.Config.TIMINGS_AVAILABLE 105 | ['HDL-32E', 'VLP-16', 'VLP-32C', 'Alpha Prime'] 106 | ``` 107 | 108 | Other available options are: 109 | 110 | * `calibration_file` – the beam calibration details from Velodyne are used by default based on the model ID. If you 111 | however wish to use a more specific calibration, you can specify one in 112 | the [YAML format](https://wiki.ros.org/velodyne_pointcloud#gen_calibration.py) used by the ROS driver. 113 | * `min_range` and `max_range` – only return points between these range values. 114 | * `min_angle` and `max_angle` – only return points between these azimuth angles. 115 | 116 | Options only applicable to PCAP decoding: 117 | 118 | * `rpm` – the device rotation speed in revolutions per minute. 119 | * `gps_time` – use the timestamp from the packet's data if true, packet's arrival time otherwise (default). 120 | * `timestamp_first_packet` – whether the timestamps are set based on the first or last packet in the scan 121 | 122 | ## Authors 123 | 124 | * Martin Valgur ([@valgur](https://github.com/valgur)) 125 | 126 | The core functionality has been adapted from the ROS [velodyne driver](https://github.com/ros-drivers/velodyne). 127 | 128 | ## License 129 | 130 | [BSD 3-Clause License](LICENSE) 131 | -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/calibration.h: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2012, 2019 Austin Robot Technology, Piyush Khandelwal, Joshua Whitley 2 | // All rights reserved. 3 | // 4 | // Software License Agreement (BSD License 2.0) 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions 8 | // are met: 9 | // 10 | // * Redistributions of source code must retain the above copyright 11 | // notice, this list of conditions and the following disclaimer. 12 | // * Redistributions in binary form must reproduce the above 13 | // copyright notice, this list of conditions and the following 14 | // disclaimer in the documentation and/or other materials provided 15 | // with the distribution. 16 | // * Neither the name of {copyright_holder} nor the names of its 17 | // contributors may be used to endorse or promote products derived 18 | // from this software without specific prior written permission. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 | // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 | // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 | // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 | // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 | // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 | // POSSIBILITY OF SUCH DAMAGE. 32 | 33 | #pragma once 34 | 35 | #include 36 | #include 37 | #include 38 | 39 | namespace velodyne_decoder { 40 | 41 | /** \brief correction values for a single laser 42 | * 43 | * Correction values for a single laser (as provided by db.xml from 44 | * Velodyne). Includes parameters for Velodyne HDL-64E S2.1. 45 | * 46 | * http://velodynelidar.com/lidar/products/manual/63-HDL64E%20S2%20Manual_Rev%20D_2011_web.pdf 47 | */ 48 | 49 | /** \brief Correction information for a single laser. */ 50 | struct LaserCorrection { 51 | /** parameters in db.xml */ 52 | float rot_correction; 53 | float vert_correction; 54 | float dist_correction; 55 | bool two_pt_correction_available; 56 | float dist_correction_x; 57 | float dist_correction_y; 58 | float vert_offset_correction; 59 | float horiz_offset_correction; 60 | int max_intensity; 61 | int min_intensity; 62 | float focal_distance; 63 | float focal_slope; 64 | 65 | /** cached values calculated when the calibration file is read */ 66 | float cos_rot_correction; ///< cosine of rot_correction 67 | float sin_rot_correction; ///< sine of rot_correction 68 | float cos_vert_correction; ///< cosine of vert_correction 69 | float sin_vert_correction; ///< sine of vert_correction 70 | 71 | int laser_ring; ///< ring number for this laser 72 | }; 73 | 74 | /** \brief Calibration information for the entire device. */ 75 | class Calibration { 76 | public: 77 | float distance_resolution_m; 78 | std::map laser_corrections_map; 79 | std::vector laser_corrections; 80 | int num_lasers; 81 | bool initialized; 82 | bool ros_info; 83 | 84 | public: 85 | explicit Calibration(bool info = true) 86 | : distance_resolution_m(0.002f), num_lasers(0), initialized(false), ros_info(info) {} 87 | explicit Calibration(const std::string &calibration_file, bool info = true) 88 | : distance_resolution_m(0.002f), ros_info(info) { 89 | read(calibration_file); 90 | } 91 | 92 | void read(const std::string &calibration_file); 93 | void write(const std::string &calibration_file); 94 | }; 95 | 96 | } // namespace velodyne_decoder 97 | -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/config.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Martin Valgur 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #pragma once 5 | 6 | #define _USE_MATH_DEFINES 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace velodyne_decoder { 13 | 14 | struct Config { 15 | // PacketDecoder params 16 | std::string model; 17 | std::string calibration_file; ///< calibration file path 18 | float min_range = 0.1; ///< minimum range to publish 19 | float max_range = 200; ///< maximum range to publish 20 | int min_angle = 0; ///< minimum angle to publish 21 | int max_angle = 36000; ///< maximum angle to publish 22 | 23 | // ScanDecoder params 24 | double rpm = -1; ///< device rotation rate 25 | bool timestamp_first_packet = false; ///< whether we are timestamping based on 26 | ///< the first or last packet in the scan 27 | bool gps_time = false; ///< true: use the packet's time field, 28 | ///< false: use the time of arrival 29 | 30 | void setMinAngleDeg(double min_angle_) { min_angle = std::lround(min_angle_ * 100); } 31 | double getMinAngleDeg() const { return static_cast(min_angle) / 100; } 32 | void setMaxAngleDeg(double max_angle_) { max_angle = std::lround(max_angle_ * 100); } 33 | double getMaxAngleDeg() const { return static_cast(max_angle) / 100; } 34 | 35 | /// Takes care of model name aliases and deprecations 36 | static std::string standardizeModelId(const std::string &model) { 37 | if (model == "VLS-128") // deprecated in favor of "Alpha Prime" 38 | return "Alpha Prime"; 39 | if (model == "HDL-64E_S2.1") 40 | return "HDL-64E_S2"; 41 | return model; 42 | } 43 | 44 | static const std::vector SUPPORTED_MODELS; 45 | static const std::vector TIMINGS_AVAILABLE; 46 | }; 47 | 48 | inline const std::vector Config::SUPPORTED_MODELS = // 49 | {"HDL-32E", "HDL-64E", "HDL-64E_S2", "HDL-64E_S3", "VLP-16", "VLP-32C", "Alpha Prime"}; 50 | 51 | inline const std::vector Config::TIMINGS_AVAILABLE = // 52 | {"HDL-32E", "VLP-16", "VLP-32C", "Alpha Prime"}; 53 | 54 | } // namespace velodyne_decoder 55 | -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/packet_decoder.h: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2007-2021 Yaxin Liu, Patrick Beeson, Jack O'Quin, Joshua Whitley, Martin Valgur 2 | // All rights reserved. 3 | // 4 | // Software License Agreement (BSD License 2.0) 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions 8 | // are met: 9 | // 10 | // * Redistributions of source code must retain the above copyright 11 | // notice, this list of conditions and the following disclaimer. 12 | // * Redistributions in binary form must reproduce the above 13 | // copyright notice, this list of conditions and the following 14 | // disclaimer in the documentation and/or other materials provided 15 | // with the distribution. 16 | // * Neither the name of {copyright_holder} nor the names of its 17 | // contributors may be used to endorse or promote products derived 18 | // from this software without specific prior written permission. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 | // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 | // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 | // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 | // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 | // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 | // POSSIBILITY OF SUCH DAMAGE. 32 | 33 | /** @file 34 | * 35 | * @brief Interfaces for interpreting raw packets from the Velodyne 3D LIDAR. 36 | * 37 | * @author Yaxin Liu 38 | * @author Patrick Beeson 39 | * @author Jack O'Quin 40 | */ 41 | 42 | #pragma once 43 | 44 | #define _USE_MATH_DEFINES 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | 51 | #include "velodyne_decoder/calibration.h" 52 | #include "velodyne_decoder/config.h" 53 | #include "velodyne_decoder/pointcloud_aggregator.h" 54 | #include "velodyne_decoder/types.h" 55 | 56 | namespace velodyne_decoder { 57 | 58 | /** \brief Velodyne data conversion class */ 59 | class PacketDecoder { 60 | public: 61 | explicit PacketDecoder(const Config &config); 62 | 63 | void unpack(const VelodynePacket &pkt, PointCloudAggregator &data, Time scan_start_time); 64 | 65 | void setParameters(double min_range, double max_range, double view_direction, double view_width); 66 | 67 | int scansPerPacket() const; 68 | 69 | /** configuration parameters */ 70 | Config config_; 71 | 72 | /** calibration file */ 73 | velodyne_decoder::Calibration calibration_; 74 | 75 | protected: 76 | float sin_rot_table_[ROTATION_MAX_UNITS]; 77 | float cos_rot_table_[ROTATION_MAX_UNITS]; 78 | 79 | // Caches the azimuth percent offset for the VLS-128 laser firings 80 | float vls_128_laser_azimuth_cache[16]; 81 | 82 | // timing offset lookup table 83 | std::vector> timing_offsets_; 84 | 85 | /** \brief setup per-point timing offsets 86 | * 87 | * Runs during initialization and determines the firing time for each point in the scan 88 | */ 89 | static std::vector> buildTimings(const std::string &model); 90 | 91 | void setupSinCosCache(); 92 | void setupAzimuthCache(); 93 | 94 | /** add private function to handle the VLP16 **/ 95 | void unpack_vlp16(const VelodynePacket &pkt, PointCloudAggregator &data, 96 | Time scan_start_time) const; 97 | 98 | void unpack_vlp32_vlp64(const VelodynePacket &pkt, PointCloudAggregator &data, 99 | Time scan_start_time) const; 100 | 101 | void unpack_vls128(const VelodynePacket &pkt, PointCloudAggregator &data, 102 | Time scan_start_time) const; 103 | 104 | void unpackPointCommon(PointCloudAggregator &data, const LaserCorrection &corrections, 105 | const raw_measurement_t &measurement, uint16_t azimuth, float time) const; 106 | 107 | /** in-line test whether a point is in range */ 108 | constexpr bool pointInRange(float range) const { 109 | return range >= config_.min_range && range <= config_.max_range; 110 | } 111 | }; 112 | 113 | } // namespace velodyne_decoder 114 | -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/pointcloud_aggregator.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Martin Valgur 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #pragma once 5 | 6 | #include "velodyne_decoder/types.h" 7 | 8 | #include 9 | 10 | namespace velodyne_decoder { 11 | 12 | class PointCloudAggregator { 13 | public: 14 | PointCloudAggregator(float max_range, float min_range, int scans_per_packet) 15 | : max_range(max_range), min_range(min_range), scans_per_packet(scans_per_packet) {} 16 | 17 | float max_range; 18 | float min_range; 19 | int scans_per_packet; 20 | 21 | virtual void init(const std::vector &scan_packets) { 22 | cloud.clear(); 23 | cloud.reserve(scan_packets.size() * scans_per_packet); 24 | } 25 | 26 | void newLine() {} 27 | 28 | void addPoint(float x, float y, float z, uint16_t ring, uint16_t /*azimuth*/, float distance, 29 | float intensity, float time) { 30 | if (pointInRange(distance)) { 31 | cloud.emplace_back(PointXYZIRT{x, y, z, intensity, ring, time}); 32 | } 33 | } 34 | 35 | constexpr bool pointInRange(float range) const { 36 | return range >= min_range && range <= max_range; 37 | } 38 | 39 | PointCloud cloud; 40 | }; 41 | 42 | } /* namespace velodyne_decoder */ 43 | -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/scan_decoder.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Martin Valgur 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #pragma once 5 | 6 | #include 7 | 8 | #include "velodyne_decoder/config.h" 9 | #include "velodyne_decoder/packet_decoder.h" 10 | #include "velodyne_decoder/types.h" 11 | 12 | namespace velodyne_decoder { 13 | 14 | class ScanDecoder { 15 | public: 16 | explicit ScanDecoder(const Config &config); 17 | 18 | PointCloud decode(const VelodyneScan &scan); 19 | PointCloud decode(Time scan_stamp, const std::vector &scan_packets); 20 | 21 | velodyne_decoder::PacketDecoder packet_decoder_; 22 | velodyne_decoder::PointCloudAggregator cloud_aggregator_; 23 | }; 24 | 25 | } // namespace velodyne_decoder 26 | -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/stream_decoder.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Martin Valgur 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #pragma once 5 | 6 | #include 7 | #include 8 | 9 | #include "velodyne_decoder/config.h" 10 | #include "velodyne_decoder/scan_decoder.h" 11 | #include "velodyne_decoder/types.h" 12 | 13 | namespace velodyne_decoder { 14 | 15 | class StreamDecoder { 16 | public: 17 | explicit StreamDecoder(const Config &config); 18 | 19 | std::optional> decode(Time stamp, const RawPacketData &packet); 20 | std::optional> decode(const VelodynePacket &packet); 21 | 22 | static int calc_packets_per_scan(const std::string &model, double rpm); 23 | 24 | protected: 25 | Config config_; 26 | ScanDecoder scan_decoder_; 27 | int packets_per_scan_; 28 | std::vector scan_packets_; 29 | }; 30 | 31 | } // namespace velodyne_decoder -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/time_conversion.h: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2019 Matthew Pitropov, Joshua Whitley 2 | // All rights reserved. 3 | // 4 | // Software License Agreement (BSD License 2.0) 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions 8 | // are met: 9 | // 10 | // * Redistributions of source code must retain the above copyright 11 | // notice, this list of conditions and the following disclaimer. 12 | // * Redistributions in binary form must reproduce the above 13 | // copyright notice, this list of conditions and the following 14 | // disclaimer in the documentation and/or other materials provided 15 | // with the distribution. 16 | // * Neither the name of {copyright_holder} nor the names of its 17 | // contributors may be used to endorse or promote products derived 18 | // from this software without specific prior written permission. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 | // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 | // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 | // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 | // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 | // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 | // POSSIBILITY OF SUCH DAMAGE. 32 | 33 | #pragma once 34 | 35 | using Time = double; 36 | 37 | /** @brief Function used to check that hour assigned to timestamp in conversion is 38 | * correct. Velodyne only returns time since the top of the hour, so if the computer clock 39 | * and the velodyne clock (gps-synchronized) are a little off, there is a chance the wrong 40 | * hour may be associated with the timestamp 41 | * 42 | * @param packet_time timestamp recovered from velodyne 43 | * @param arrival_time time coming from computer's clock 44 | * @return timestamp from velodyne, possibly shifted by 1 hour if the function arguments 45 | * disagree by more than a half-hour. 46 | */ 47 | Time resolveHourAmbiguity(const Time packet_time, const Time arrival_time) { 48 | const int HALFHOUR_TO_SEC = 1800; 49 | Time retval = packet_time; 50 | uint32_t packet_sec = (uint32_t)std::floor(packet_time); 51 | uint32_t arrival_sec = (uint32_t)std::floor(arrival_time); 52 | if (arrival_sec > packet_sec) { 53 | if (arrival_sec - packet_sec > HALFHOUR_TO_SEC) { 54 | retval += 2 * HALFHOUR_TO_SEC; 55 | } 56 | } else if (packet_sec - arrival_sec > HALFHOUR_TO_SEC) { 57 | retval -= 2 * HALFHOUR_TO_SEC; 58 | } 59 | return retval; 60 | } 61 | 62 | Time getPacketTimestamp(const uint8_t *const data, Time arrival_time) { 63 | // time for each packet is a 4 byte uint 64 | // It is the number of microseconds from the top of the hour 65 | uint32_t usecs = (((uint32_t)data[3]) << 24u | // 66 | ((uint32_t)data[2]) << 16u | // 67 | ((uint32_t)data[1]) << 8u | // 68 | ((uint32_t)data[0])); 69 | const int HOUR_TO_SEC = 3600; 70 | uint32_t cur_hour = (uint32_t)std::floor(arrival_time) / HOUR_TO_SEC; 71 | Time packet_time = (cur_hour * HOUR_TO_SEC) + (usecs * 1e-6); 72 | if (arrival_time > 0) { 73 | packet_time = resolveHourAmbiguity(packet_time, arrival_time); 74 | } 75 | return packet_time; 76 | } 77 | -------------------------------------------------------------------------------- /velodyne_decoder-master/include/velodyne_decoder/types.h: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2007-2021 Yaxin Liu, Patrick Beeson, Jack O'Quin, Joshua Whitley, Martin Valgur 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #pragma once 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | namespace velodyne_decoder { 11 | 12 | /** 13 | * Raw Velodyne packet constants and structures. 14 | */ 15 | constexpr int SIZE_BLOCK = 100; 16 | constexpr int SCANS_PER_BLOCK = 32; 17 | 18 | constexpr float ROTATION_RESOLUTION = 0.01f; // [deg] 19 | constexpr uint16_t ROTATION_MAX_UNITS = 36000u; // [deg/100] 20 | 21 | /** @todo make this work for both big and little-endian machines */ 22 | constexpr uint16_t UPPER_BANK = 0xeeff; 23 | constexpr uint16_t LOWER_BANK = 0xddff; 24 | 25 | /** Special Defines for VLP16 support **/ 26 | constexpr int VLP16_FIRINGS_PER_BLOCK = 2; 27 | constexpr int VLP16_SCANS_PER_FIRING = 16; 28 | constexpr float VLP16_BLOCK_TDURATION = 110.592f; // [µs] 29 | constexpr float VLP16_DSR_TOFFSET = 2.304f; // [µs] 30 | constexpr float VLP16_FIRING_TOFFSET = 55.296f; // [µs] 31 | 32 | constexpr int PACKET_SIZE = 1206; 33 | constexpr int BLOCKS_PER_PACKET = 12; 34 | constexpr int PACKET_STATUS_SIZE = 4; 35 | constexpr int SCANS_PER_PACKET = (SCANS_PER_BLOCK * BLOCKS_PER_PACKET); 36 | 37 | /** Special Definitions for VLS-128 / Alpha Prime support **/ 38 | // These are used to detect which bank of 32 lasers is in this block 39 | constexpr uint16_t VLS128_BANK_1 = 0xeeff; 40 | constexpr uint16_t VLS128_BANK_2 = 0xddff; 41 | constexpr uint16_t VLS128_BANK_3 = 0xccff; 42 | constexpr uint16_t VLS128_BANK_4 = 0xbbff; 43 | 44 | constexpr float VLS128_CHANNEL_TDURATION = 2.665f; // [µs] Channels corresponds to one laser firing 45 | constexpr float VLS128_SEQ_TDURATION = 46 | 53.3f; // [µs] Sequence is a set of laser firings including recharging 47 | constexpr float VLS128_TOH_ADJUSTMENT = 48 | 8.7f; // [µs] μs. Top Of the Hour is aligned with the fourth firing group in a firing sequence. 49 | constexpr float VLS128_DISTANCE_RESOLUTION = 0.004f; // [m] 50 | constexpr uint8_t VLS128_MODEL_ID = 161; 51 | 52 | #pragma pack(push, 1) 53 | struct raw_measurement_t { 54 | uint16_t distance; 55 | uint8_t intensity; 56 | }; 57 | 58 | /** \brief Raw Velodyne data block. 59 | * 60 | * Each block contains data from either the upper or lower laser 61 | * bank. The device returns three times as many upper bank blocks. 62 | */ 63 | struct raw_block_t { 64 | uint16_t header; ///< UPPER_BANK or LOWER_BANK 65 | uint16_t rotation; ///< 0-35999, divide by 100 to get degrees 66 | raw_measurement_t data[SCANS_PER_BLOCK]; 67 | }; 68 | 69 | /** \brief Raw Velodyne packet. 70 | * 71 | * revolution is described in the device manual as incrementing 72 | * (mod 65536) for each physical turn of the device. Our device 73 | * seems to alternate between two different values every third 74 | * packet. One value increases, the other decreases. 75 | * 76 | * \todo figure out if revolution is only present for one of the 77 | * two types of status fields 78 | * 79 | * status has either a temperature encoding or the microcode level 80 | */ 81 | 82 | struct raw_packet_t { 83 | raw_block_t blocks[BLOCKS_PER_PACKET]; 84 | uint16_t revolution; 85 | uint8_t status[PACKET_STATUS_SIZE]; 86 | }; 87 | #pragma pack(pop) 88 | 89 | using Time = double; 90 | using RawPacketData = std::array; 91 | 92 | struct VelodynePacket { 93 | Time stamp; 94 | RawPacketData data; 95 | 96 | VelodynePacket() = default; 97 | VelodynePacket(Time stamp, const RawPacketData &data) : stamp(stamp), data(data) {} 98 | }; 99 | 100 | struct VelodyneScan { 101 | Time stamp; 102 | std::vector packets; 103 | 104 | VelodyneScan() = default; 105 | VelodyneScan(Time stamp, std::vector packets) 106 | : stamp(stamp), packets(std::move(packets)) {} 107 | }; 108 | 109 | struct alignas(16) PointXYZIRT { 110 | struct alignas(16) { 111 | float x; 112 | float y; 113 | float z; 114 | }; 115 | float intensity; 116 | uint16_t ring; 117 | float time; 118 | 119 | PointXYZIRT() = default; 120 | PointXYZIRT(float x, float y, float z, float intensity, uint16_t ring, float time) 121 | : x(x), y(y), z(z), intensity(intensity), ring(ring), time(time) {} 122 | }; 123 | 124 | using PointCloud = std::vector; 125 | 126 | } // namespace velodyne_decoder 127 | -------------------------------------------------------------------------------- /velodyne_decoder-master/setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = velodyne-decoder 3 | version = 2.3.0 4 | author = Martin Valgur 5 | author_email = martin.valgur@gmail.com 6 | url = https://github.com/valgur/velodyne_decoder 7 | description = Decoder for raw Velodyne packet data 8 | long_description = file: README.md 9 | long_description_content_type = text/markdown 10 | license_files = LICENSE 11 | keywords = 12 | Velodyne 13 | pointcloud 14 | PCL 15 | classifiers = 16 | License :: OSI Approved :: BSD License 17 | Programming Language :: Python :: 2 18 | Programming Language :: Python :: 3 19 | Operating System :: OS Independent 20 | 21 | [options] 22 | packages = find: 23 | include_package_data = True 24 | zip_safe = False 25 | install_requires = 26 | numpy 27 | importlib_resources 28 | dpkt 29 | 30 | [options.extras_require] 31 | tests = 32 | pytest 33 | requests 34 | pathlib; python_version < "3" 35 | rosbag 36 | 37 | [options.package_data] 38 | * = *.yml 39 | -------------------------------------------------------------------------------- /velodyne_decoder-master/setup.py: -------------------------------------------------------------------------------- 1 | # based on https://github.com/pybind/cmake_example/blob/master/setup.py 2 | import os 3 | import re 4 | import shlex 5 | import subprocess 6 | import sys 7 | 8 | from setuptools import setup, Extension 9 | from setuptools.command.build_ext import build_ext 10 | 11 | # Convert distutils Windows platform specifiers to CMake -A arguments 12 | PLAT_TO_CMAKE = { 13 | "win32": "Win32", 14 | "win-amd64": "x64", 15 | "win-arm32": "ARM", 16 | "win-arm64": "ARM64", 17 | } 18 | 19 | 20 | # A CMakeExtension needs a sourcedir instead of a file list. 21 | # The name must be the _single_ output extension from the CMake build. 22 | # If you need multiple extensions, see scikit-build. 23 | class CMakeExtension(Extension): 24 | def __init__(self, name, sourcedir=""): 25 | Extension.__init__(self, name, sources=[]) 26 | self.sourcedir = os.path.abspath(sourcedir) 27 | 28 | 29 | class CMakeBuild(build_ext): 30 | # Allows additional CMake args to be provided with `setup.py build_ext --cmake-args=...` 31 | user_options = build_ext.user_options + [('cmake-args=', None, "options to pass to CMake")] 32 | 33 | def initialize_options(self): 34 | build_ext.initialize_options(self) 35 | self.cmake_args = None 36 | 37 | def finalize_options(self): 38 | build_ext.finalize_options(self) 39 | if self.cmake_args is None: 40 | self.cmake_args = [] 41 | else: 42 | self.cmake_args = shlex.split(self.cmake_args) 43 | 44 | def build_extension(self, ext): 45 | extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) 46 | 47 | # required for auto-detection & inclusion of auxiliary "native" libs 48 | if not extdir.endswith(os.path.sep): 49 | extdir += os.path.sep 50 | 51 | debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug 52 | cfg = "Debug" if debug else "Release" 53 | 54 | # CMake lets you override the generator - we need to check this. 55 | # Can be set with Conda-Build, for example. 56 | cmake_generator = os.environ.get("CMAKE_GENERATOR", "") 57 | 58 | # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON 59 | # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code 60 | # from Python. 61 | cmake_args = [ 62 | "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir), 63 | "-DPYTHON_EXECUTABLE={}".format(sys.executable), 64 | "-DVERSION_INFO={}".format(self.distribution.get_version()), 65 | "-DCMAKE_BUILD_TYPE={}".format(cfg), # not used on MSVC, but no harm 66 | "-DBUILD_PYTHON=yes", 67 | ] 68 | build_args = [] 69 | # Adding CMake arguments set as environment variable 70 | # (needed e.g. to build for ARM OSx on conda-forge) 71 | if "CMAKE_ARGS" in os.environ: 72 | cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item] 73 | 74 | if self.compiler.compiler_type != "msvc": 75 | # Using Ninja-build since it a) is available as a wheel and b) 76 | # multithreads automatically. MSVC would require all variables be 77 | # exported for Ninja to pick it up, which is a little tricky to do. 78 | # Users can override the generator with CMAKE_GENERATOR in CMake 79 | # 3.15+. 80 | if not cmake_generator: 81 | try: 82 | import ninja # noqa: F401 83 | 84 | cmake_args += ["-GNinja"] 85 | except ImportError: 86 | pass 87 | else: 88 | # Single config generators are handled "normally" 89 | single_config = any(x in cmake_generator for x in {"NMake", "Ninja"}) 90 | 91 | # CMake allows an arch-in-generator style for backward compatibility 92 | contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"}) 93 | 94 | # Specify the arch if using MSVC generator, but only if it doesn't 95 | # contain a backward-compatibility arch spec already in the 96 | # generator name. 97 | if not single_config and not contains_arch: 98 | cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]] 99 | 100 | # Multi-config generators have a different way to specify configs 101 | if not single_config: 102 | cmake_args += [ 103 | "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir) 104 | ] 105 | build_args += ["--config", cfg] 106 | 107 | if sys.platform.startswith("darwin"): 108 | # Cross-compile support for macOS - respect ARCHFLAGS if set 109 | archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", "")) 110 | if archs: 111 | cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))] 112 | 113 | # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level 114 | # across all generators. 115 | if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ: 116 | # self.parallel is a Python 3 only way to set parallel jobs by hand 117 | # using -j in the build_ext call, not supported by pip or PyPA-build. 118 | if hasattr(self, "parallel") and self.parallel: 119 | # CMake 3.12+ only. 120 | build_args += ["-j{}".format(self.parallel)] 121 | 122 | if not os.path.exists(self.build_temp): 123 | os.makedirs(self.build_temp) 124 | 125 | cmd = ["cmake", ext.sourcedir] + cmake_args + self.cmake_args 126 | print("Running", " ".join(cmd)) 127 | subprocess.check_call(cmd, cwd=self.build_temp) 128 | 129 | cmd = ["cmake", "--build", "."] + build_args 130 | print("Running", " ".join(cmd)) 131 | subprocess.check_call(cmd, cwd=self.build_temp) 132 | 133 | 134 | setup( 135 | ext_modules=[CMakeExtension("cmake")], 136 | cmdclass={"build_ext": CMakeBuild}, 137 | ) 138 | -------------------------------------------------------------------------------- /velodyne_decoder-master/src/calibration.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * \file calibration.cc 3 | * 4 | * \author Piyush Khandelwal (piyushk@cs.utexas.edu) 5 | * Copyright (C) 2012, Austin Robot Technology, 6 | * The University of Texas at Austin 7 | * 8 | * License: Modified BSD License 9 | */ 10 | 11 | #include "velodyne_decoder/calibration.h" 12 | 13 | #define _USE_MATH_DEFINES 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | namespace YAML { 23 | // The >> operator disappeared in yaml-cpp 0.5, so this function is 24 | // added to provide support for code written under the yaml-cpp 0.3 API. 25 | template void operator>>(const YAML::Node &node, T &i) { i = node.as(); } 26 | } // namespace YAML 27 | 28 | namespace velodyne_decoder { 29 | 30 | constexpr auto NUM_LASERS = "num_lasers"; 31 | constexpr auto DISTANCE_RESOLUTION = "distance_resolution"; 32 | constexpr auto LASERS = "lasers"; 33 | constexpr auto LASER_ID = "laser_id"; 34 | constexpr auto ROT_CORRECTION = "rot_correction"; 35 | constexpr auto VERT_CORRECTION = "vert_correction"; 36 | constexpr auto DIST_CORRECTION = "dist_correction"; 37 | constexpr auto TWO_PT_CORRECTION_AVAILABLE = "two_pt_correction_available"; 38 | constexpr auto DIST_CORRECTION_X = "dist_correction_x"; 39 | constexpr auto DIST_CORRECTION_Y = "dist_correction_y"; 40 | constexpr auto VERT_OFFSET_CORRECTION = "vert_offset_correction"; 41 | constexpr auto HORIZ_OFFSET_CORRECTION = "horiz_offset_correction"; 42 | constexpr auto MAX_INTENSITY = "max_intensity"; 43 | constexpr auto MIN_INTENSITY = "min_intensity"; 44 | constexpr auto FOCAL_DISTANCE = "focal_distance"; 45 | constexpr auto FOCAL_SLOPE = "focal_slope"; 46 | 47 | /** Read calibration for a single laser. */ 48 | void operator>>(const YAML::Node &node, std::pair &correction) { 49 | node[LASER_ID] >> correction.first; 50 | node[ROT_CORRECTION] >> correction.second.rot_correction; 51 | node[VERT_CORRECTION] >> correction.second.vert_correction; 52 | node[DIST_CORRECTION] >> correction.second.dist_correction; 53 | if (node[TWO_PT_CORRECTION_AVAILABLE]) 54 | node[TWO_PT_CORRECTION_AVAILABLE] >> correction.second.two_pt_correction_available; 55 | else 56 | correction.second.two_pt_correction_available = false; 57 | node[DIST_CORRECTION_X] >> correction.second.dist_correction_x; 58 | node[DIST_CORRECTION_Y] >> correction.second.dist_correction_y; 59 | node[VERT_OFFSET_CORRECTION] >> correction.second.vert_offset_correction; 60 | if (node[HORIZ_OFFSET_CORRECTION]) 61 | node[HORIZ_OFFSET_CORRECTION] >> correction.second.horiz_offset_correction; 62 | else 63 | correction.second.horiz_offset_correction = 0; 64 | 65 | std::optional max_intensity_node; 66 | if (node[MAX_INTENSITY]) { 67 | *max_intensity_node = node[MAX_INTENSITY]; 68 | } 69 | if (max_intensity_node) { 70 | float max_intensity_float; 71 | *max_intensity_node >> max_intensity_float; 72 | correction.second.max_intensity = std::floor(max_intensity_float); 73 | } else { 74 | correction.second.max_intensity = 255; 75 | } 76 | 77 | std::optional min_intensity_node; 78 | if (node[MIN_INTENSITY]) { 79 | *min_intensity_node = node[MIN_INTENSITY]; 80 | } 81 | if (min_intensity_node) { 82 | float min_intensity_float; 83 | *min_intensity_node >> min_intensity_float; 84 | correction.second.min_intensity = std::floor(min_intensity_float); 85 | } else { 86 | correction.second.min_intensity = 0; 87 | } 88 | node[FOCAL_DISTANCE] >> correction.second.focal_distance; 89 | node[FOCAL_SLOPE] >> correction.second.focal_slope; 90 | 91 | // Calculate cached values 92 | correction.second.cos_rot_correction = cosf(correction.second.rot_correction); 93 | correction.second.sin_rot_correction = sinf(correction.second.rot_correction); 94 | correction.second.cos_vert_correction = cosf(correction.second.vert_correction); 95 | correction.second.sin_vert_correction = sinf(correction.second.vert_correction); 96 | 97 | correction.second.laser_ring = 0; // clear initially (set later) 98 | } 99 | 100 | /** Read entire calibration file. */ 101 | void operator>>(const YAML::Node &node, Calibration &calibration) { 102 | int num_lasers; 103 | node[NUM_LASERS] >> num_lasers; 104 | float distance_resolution_m; 105 | node[DISTANCE_RESOLUTION] >> distance_resolution_m; 106 | const YAML::Node &lasers = node[LASERS]; 107 | calibration.laser_corrections.clear(); 108 | calibration.num_lasers = num_lasers; 109 | calibration.distance_resolution_m = distance_resolution_m; 110 | calibration.laser_corrections.resize(num_lasers); 111 | for (int i = 0; i < num_lasers; i++) { 112 | std::pair correction; 113 | lasers[i] >> correction; 114 | const int index = correction.first; 115 | if (index >= calibration.laser_corrections.size()) { 116 | calibration.laser_corrections.resize(index + 1); 117 | } 118 | calibration.laser_corrections[index] = (correction.second); 119 | calibration.laser_corrections_map.insert(correction); 120 | } 121 | 122 | // For each laser ring, find the next-smallest vertical angle. 123 | // 124 | // This implementation is simple, but not efficient. That is OK, 125 | // since it only runs while starting up. 126 | double next_angle = -std::numeric_limits::infinity(); 127 | for (int ring = 0; ring < num_lasers; ++ring) { 128 | 129 | // find minimum remaining vertical offset correction 130 | double min_seen = std::numeric_limits::infinity(); 131 | int next_index = num_lasers; 132 | for (int j = 0; j < num_lasers; ++j) { 133 | 134 | double angle = calibration.laser_corrections[j].vert_correction; 135 | if (next_angle < angle && angle < min_seen) { 136 | min_seen = angle; 137 | next_index = j; 138 | } 139 | } 140 | 141 | if (next_index < num_lasers) { // anything found in this ring? 142 | // store this ring number with its corresponding laser number 143 | calibration.laser_corrections[next_index].laser_ring = ring; 144 | next_angle = min_seen; 145 | } 146 | } 147 | } 148 | 149 | YAML::Emitter &operator<<(YAML::Emitter &out, const std::pair correction) { 150 | out << YAML::BeginMap; 151 | out << YAML::Key << LASER_ID << YAML::Value << correction.first; 152 | out << YAML::Key << ROT_CORRECTION << YAML::Value << correction.second.rot_correction; 153 | out << YAML::Key << VERT_CORRECTION << YAML::Value << correction.second.vert_correction; 154 | out << YAML::Key << DIST_CORRECTION << YAML::Value << correction.second.dist_correction; 155 | out << YAML::Key << TWO_PT_CORRECTION_AVAILABLE << YAML::Value 156 | << correction.second.two_pt_correction_available; 157 | out << YAML::Key << DIST_CORRECTION_X << YAML::Value << correction.second.dist_correction_x; 158 | out << YAML::Key << DIST_CORRECTION_Y << YAML::Value << correction.second.dist_correction_y; 159 | out << YAML::Key << VERT_OFFSET_CORRECTION << YAML::Value 160 | << correction.second.vert_offset_correction; 161 | out << YAML::Key << HORIZ_OFFSET_CORRECTION << YAML::Value 162 | << correction.second.horiz_offset_correction; 163 | out << YAML::Key << MAX_INTENSITY << YAML::Value << correction.second.max_intensity; 164 | out << YAML::Key << MIN_INTENSITY << YAML::Value << correction.second.min_intensity; 165 | out << YAML::Key << FOCAL_DISTANCE << YAML::Value << correction.second.focal_distance; 166 | out << YAML::Key << FOCAL_SLOPE << YAML::Value << correction.second.focal_slope; 167 | out << YAML::EndMap; 168 | return out; 169 | } 170 | 171 | YAML::Emitter &operator<<(YAML::Emitter &out, const Calibration &calibration) { 172 | out << YAML::BeginMap; 173 | out << YAML::Key << NUM_LASERS << YAML::Value << calibration.laser_corrections.size(); 174 | out << YAML::Key << DISTANCE_RESOLUTION << YAML::Value << calibration.distance_resolution_m; 175 | out << YAML::Key << LASERS << YAML::Value << YAML::BeginSeq; 176 | for (const auto &laser_correction : calibration.laser_corrections_map) { 177 | out << laser_correction; 178 | } 179 | out << YAML::EndSeq; 180 | out << YAML::EndMap; 181 | return out; 182 | } 183 | 184 | void Calibration::read(const std::string &calibration_file) { 185 | std::ifstream fin(calibration_file.c_str()); 186 | if (!fin.is_open()) { 187 | initialized = false; 188 | return; 189 | } 190 | initialized = true; 191 | try { 192 | YAML::Node doc; 193 | fin.close(); 194 | doc = YAML::LoadFile(calibration_file); 195 | doc >> *this; 196 | } catch (YAML::Exception &e) { 197 | std::cerr << "YAML Exception: " << e.what() << std::endl; 198 | initialized = false; 199 | } 200 | fin.close(); 201 | } 202 | 203 | void Calibration::write(const std::string &calibration_file) { 204 | std::ofstream fout(calibration_file.c_str()); 205 | YAML::Emitter out; 206 | out << *this; 207 | fout << out.c_str(); 208 | fout.close(); 209 | } 210 | 211 | } // namespace velodyne_decoder 212 | -------------------------------------------------------------------------------- /velodyne_decoder-master/src/packet_decoder.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2007 Austin Robot Technology, Patrick Beeson 3 | * Copyright (C) 2009, 2010, 2012 Austin Robot Technology, Jack O'Quin 4 | * Copyright (C) 2019, Kaarta Inc, Shawn Hanna 5 | * Copyright (C) 2021, Martin Valgur 6 | * 7 | * License: Modified BSD Software License Agreement 8 | */ 9 | 10 | /** 11 | * @file 12 | * 13 | * Velodyne 3D LIDAR data accessor class implementation. 14 | * 15 | * Class for unpacking raw Velodyne LIDAR packets into useful 16 | * formats. 17 | * 18 | * Derived classes accept raw Velodyne data for either single packets 19 | * or entire rotations, and provide it in various formats for either 20 | * on-line or off-line processing. 21 | * 22 | * @author Patrick Beeson 23 | * @author Jack O'Quin 24 | * @author Shawn Hanna 25 | * 26 | * HDL-64E S2 calibration support provided by Nick Hillier 27 | */ 28 | 29 | #include "velodyne_decoder/packet_decoder.h" 30 | 31 | #define _USE_MATH_DEFINES 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | namespace velodyne_decoder { 39 | template constexpr T SQR(T val) { return val * val; } 40 | constexpr float nan = std::numeric_limits::quiet_NaN(); 41 | 42 | PacketDecoder::PacketDecoder(const Config &config) : config_(config) { 43 | if (config_.model.empty()) { 44 | throw std::invalid_argument("No Velodyne sensor model specified!"); 45 | } 46 | 47 | timing_offsets_ = buildTimings(config_.model); 48 | 49 | // get path to angles.config file for this device 50 | if (config_.calibration_file.empty()) { 51 | throw std::invalid_argument("Calibration config file not provided "); 52 | } 53 | 54 | calibration_.read(config_.calibration_file); 55 | if (!calibration_.initialized) { 56 | throw std::runtime_error("Unable to open calibration file: " + config_.calibration_file); 57 | } 58 | 59 | setupSinCosCache(); 60 | setupAzimuthCache(); 61 | } 62 | 63 | /** Update parameters: conversions and update */ 64 | void PacketDecoder::setParameters(double min_range, double max_range, double view_direction, 65 | double view_width) { 66 | config_.min_range = min_range; 67 | config_.max_range = max_range; 68 | 69 | // converting angle parameters into the velodyne reference (rad) 70 | double tmp_min_angle = view_direction + view_width / 2; 71 | double tmp_max_angle = view_direction - view_width / 2; 72 | 73 | // computing positive modulo to keep these angles within [0;2*M_PI] 74 | tmp_min_angle = fmod(fmod(tmp_min_angle, 2 * M_PI) + 2 * M_PI, 2 * M_PI); 75 | tmp_max_angle = fmod(fmod(tmp_max_angle, 2 * M_PI) + 2 * M_PI, 2 * M_PI); 76 | 77 | // converting into the hardware velodyne ref (negative yaml and degrees) 78 | // adding 0.5 performs a centered double to int conversion 79 | config_.min_angle = 100 * (2 * M_PI - tmp_min_angle) * 180 / M_PI + 0.5; 80 | config_.max_angle = 100 * (2 * M_PI - tmp_max_angle) * 180 / M_PI + 0.5; 81 | if (config_.min_angle == config_.max_angle) { 82 | // avoid returning empty cloud if min_angle = max_angle 83 | config_.min_angle = 0; 84 | config_.max_angle = 36000; 85 | } 86 | } 87 | 88 | int PacketDecoder::scansPerPacket() const { 89 | if (calibration_.num_lasers == 16) { 90 | return BLOCKS_PER_PACKET * VLP16_FIRINGS_PER_BLOCK * VLP16_SCANS_PER_FIRING; 91 | } else { 92 | return BLOCKS_PER_PACKET * SCANS_PER_BLOCK; 93 | } 94 | } 95 | 96 | /** 97 | * Build a timing table for each block/firing. 98 | */ 99 | std::vector> PacketDecoder::buildTimings(const std::string &model) { 100 | std::vector> timing_offsets; 101 | if (model == "VLP-16") { 102 | // timing table calculation, from velodyne user manual 103 | timing_offsets.resize(12); 104 | for (auto &timing_offset : timing_offsets) { 105 | timing_offset.resize(32); 106 | } 107 | // constants 108 | double full_firing_cycle = 55.296 * 1e-6; // seconds 109 | double single_firing = 2.304 * 1e-6; // seconds 110 | bool dual_mode = false; 111 | // compute timing offsets 112 | for (size_t x = 0; x < timing_offsets.size(); ++x) { 113 | for (size_t y = 0; y < timing_offsets[x].size(); ++y) { 114 | double dataBlockIndex = 2 * (dual_mode ? x / 2 : x) + y / 16; 115 | double dataPointIndex = y % 16; 116 | // timing_offsets[block][firing] 117 | timing_offsets[x][y] = 118 | (full_firing_cycle * dataBlockIndex) + (single_firing * dataPointIndex); 119 | } 120 | } 121 | } else if (model == "VLP-32C") { 122 | // timing table calculation, from velodyne user manual 123 | timing_offsets.resize(12); 124 | for (auto &timing_offset : timing_offsets) { 125 | timing_offset.resize(32); 126 | } 127 | // constants 128 | double full_firing_cycle = 55.296 * 1e-6; // seconds 129 | double single_firing = 2.304 * 1e-6; // seconds 130 | bool dual_mode = false; 131 | // compute timing offsets 132 | for (size_t x = 0; x < timing_offsets.size(); ++x) { 133 | for (size_t y = 0; y < timing_offsets[x].size(); ++y) { 134 | double dataBlockIndex = dual_mode ? x / 2 : x; 135 | double dataPointIndex = y / 2; 136 | timing_offsets[x][y] = 137 | (full_firing_cycle * dataBlockIndex) + (single_firing * dataPointIndex); 138 | } 139 | } 140 | } else if (model == "HDL-32E") { 141 | // timing table calculation, from velodyne user manual 142 | timing_offsets.resize(12); 143 | for (auto &timing_offset : timing_offsets) { 144 | timing_offset.resize(32); 145 | } 146 | // constants 147 | double full_firing_cycle = 46.080 * 1e-6; // seconds 148 | double single_firing = 1.152 * 1e-6; // seconds 149 | bool dual_mode = false; 150 | // compute timing offsets 151 | for (size_t x = 0; x < timing_offsets.size(); ++x) { 152 | for (size_t y = 0; y < timing_offsets[x].size(); ++y) { 153 | double dataBlockIndex = dual_mode ? x / 2 : x; 154 | double dataPointIndex = y / 2; 155 | timing_offsets[x][y] = 156 | (full_firing_cycle * dataBlockIndex) + (single_firing * dataPointIndex); 157 | } 158 | } 159 | } else if (model == "Alpha Prime") { 160 | timing_offsets.resize(3); 161 | for (auto &timing_offset : timing_offsets) { 162 | timing_offset.resize(17); // 17 (+1 for the maintenance time after firing group 8) 163 | } 164 | 165 | double full_firing_cycle = VLS128_SEQ_TDURATION * 1e-6; // seconds 166 | double single_firing = VLS128_CHANNEL_TDURATION * 1e-6; // seconds 167 | double offset_paket_time = VLS128_TOH_ADJUSTMENT * 1e-6; // seconds 168 | // Compute timing offsets 169 | for (size_t x = 0; x < timing_offsets.size(); ++x) { 170 | for (size_t y = 0; y < timing_offsets[x].size(); ++y) { 171 | double sequenceIndex = x; 172 | double firingGroupIndex = y; 173 | timing_offsets[x][y] = (full_firing_cycle * sequenceIndex) + 174 | (single_firing * firingGroupIndex) - offset_paket_time; 175 | } 176 | } 177 | } else if (std::find(Config::SUPPORTED_MODELS.begin(), Config::SUPPORTED_MODELS.end(), model) == 178 | Config::SUPPORTED_MODELS.end()) { 179 | throw std::runtime_error("Unsupported Velodyne model: " + model); 180 | } else { 181 | // TODO: add require_timings 182 | // throw std::runtime_error("Timings not available for Velodyne model " + model); 183 | } 184 | return timing_offsets; 185 | } 186 | 187 | void PacketDecoder::setupSinCosCache() { 188 | // Set up cached values for sin and cos of all the possible headings 189 | constexpr float deg2rad = M_PI / 180.f; 190 | for (uint16_t rot_index = 0; rot_index < ROTATION_MAX_UNITS; ++rot_index) { 191 | float rotation = ROTATION_RESOLUTION * rot_index * deg2rad; 192 | cos_rot_table_[rot_index] = cosf(rotation); 193 | sin_rot_table_[rot_index] = sinf(rotation); 194 | } 195 | } 196 | 197 | void PacketDecoder::setupAzimuthCache() { 198 | if (config_.model == "Alpha Prime") { 199 | for (uint8_t i = 0; i < 16; i++) { 200 | vls_128_laser_azimuth_cache[i] = 201 | (VLS128_CHANNEL_TDURATION / VLS128_SEQ_TDURATION) * (i + i / 8); 202 | } 203 | } 204 | } 205 | 206 | /** @brief convert raw packet to point cloud 207 | * 208 | * @param pkt raw packet to unpack 209 | * @param pc shared pointer to point cloud (points are appended) 210 | */ 211 | void PacketDecoder::unpack(const VelodynePacket &pkt, PointCloudAggregator &data, 212 | Time scan_start_time) { 213 | /** special parsing for the VLS-128 and Alpha Prime **/ 214 | if (pkt.data[1205] == VLS128_MODEL_ID) { 215 | unpack_vls128(pkt, data, scan_start_time); 216 | return; 217 | } 218 | 219 | /** special parsing for the VLP16 **/ 220 | if (calibration_.num_lasers == 16) { 221 | unpack_vlp16(pkt, data, scan_start_time); 222 | return; 223 | } 224 | 225 | unpack_vlp32_vlp64(pkt, data, scan_start_time); 226 | } 227 | 228 | /** @brief convert raw VLP16 packet to point cloud 229 | * 230 | * @param pkt raw packet to unpack 231 | * @param pc shared pointer to point cloud (points are appended) 232 | */ 233 | void PacketDecoder::unpack_vlp16(const VelodynePacket &pkt, PointCloudAggregator &data, 234 | Time scan_start_time) const { 235 | auto *raw = reinterpret_cast(&pkt.data[0]); 236 | 237 | float time_diff_start_to_this_packet = pkt.stamp - scan_start_time; 238 | 239 | float last_azimuth_diff = 0; 240 | 241 | for (int i = 0; i < BLOCKS_PER_PACKET; i++) { 242 | const auto &block = raw->blocks[i]; 243 | 244 | // ignore packets with mangled or otherwise different contents 245 | if (UPPER_BANK != block.header) { 246 | return; // bad packet: skip the rest 247 | } 248 | 249 | // Calculate difference between current and next block's azimuth angle. 250 | float azimuth = block.rotation; 251 | float azimuth_diff = last_azimuth_diff; 252 | if (i < (BLOCKS_PER_PACKET - 1)) { 253 | int raw_azimuth_diff = raw->blocks[i + 1].rotation - block.rotation; 254 | azimuth_diff = (float)((36000 + raw_azimuth_diff) % 36000); 255 | // some packets contain an angle overflow where azimuth_diff < 0 256 | if (raw_azimuth_diff < 0) { 257 | // if last_azimuth_diff was not zero, we can assume that the velodyne's speed did not change 258 | // very much and use the same difference 259 | if (last_azimuth_diff > 0) { 260 | azimuth_diff = last_azimuth_diff; 261 | } 262 | // otherwise we are not able to use this data 263 | // TODO: we might just not use the second 16 firings 264 | else { 265 | continue; 266 | } 267 | } 268 | last_azimuth_diff = azimuth_diff; 269 | } 270 | 271 | for (int firing = 0, j = 0; firing < VLP16_FIRINGS_PER_BLOCK; firing++) { 272 | for (int dsr = 0; dsr < VLP16_SCANS_PER_FIRING; dsr++, j++) { 273 | /** correct for the laser rotation as a function of timing during the firings **/ 274 | float azimuth_corrected_f = 275 | azimuth + 276 | (azimuth_diff * ((dsr * VLP16_DSR_TOFFSET) + (firing * VLP16_FIRING_TOFFSET)) / 277 | VLP16_BLOCK_TDURATION); 278 | int azimuth_corrected = std::lround(azimuth_corrected_f) % 36000; 279 | 280 | /*condition added to avoid calculating points which are not 281 | in the interesting defined area (min_angle < area < max_angle)*/ 282 | if (!((config_.min_angle < config_.max_angle && (azimuth_corrected >= config_.min_angle && 283 | azimuth_corrected <= config_.max_angle)) || 284 | (config_.min_angle > config_.max_angle && 285 | (azimuth_corrected >= config_.min_angle || azimuth_corrected <= config_.max_angle)))) 286 | continue; 287 | 288 | float time = 0; 289 | if (!timing_offsets_.empty()) 290 | time = timing_offsets_[i][firing * 16 + dsr] + time_diff_start_to_this_packet; 291 | 292 | const auto &corrections = calibration_.laser_corrections[dsr]; 293 | const auto &measurement = block.data[j]; 294 | unpackPointCommon(data, corrections, measurement, azimuth_corrected, time); 295 | } 296 | data.newLine(); 297 | } 298 | } 299 | } 300 | 301 | /** @brief convert raw VLP-32/64 packet to point cloud 302 | */ 303 | void PacketDecoder::unpack_vlp32_vlp64(const VelodynePacket &pkt, PointCloudAggregator &data, 304 | Time scan_start_time) const { 305 | auto *raw = reinterpret_cast(&pkt.data[0]); 306 | 307 | float time_diff_start_to_this_packet = pkt.stamp - scan_start_time; 308 | 309 | for (int i = 0; i < BLOCKS_PER_PACKET; i++) { 310 | const auto &block = raw->blocks[i]; 311 | 312 | // upper bank lasers are numbered [0..31] 313 | // lower bank lasers are [32..63] 314 | // NOTE: this is a change from the old velodyne_common implementation 315 | int bank_origin = (block.header == UPPER_BANK) ? 0 : 32; 316 | 317 | uint16_t azimuth = block.rotation; 318 | 319 | /*condition added to avoid calculating points which are not 320 | in the interesting defined area (min_angle < area < max_angle)*/ 321 | if (!((config_.min_angle < config_.max_angle && 322 | (azimuth >= config_.min_angle && azimuth <= config_.max_angle)) || 323 | (config_.min_angle > config_.max_angle && 324 | (azimuth >= config_.min_angle || azimuth <= config_.max_angle)))) 325 | continue; 326 | 327 | for (int j = 0; j < SCANS_PER_BLOCK; j++) { 328 | const uint8_t laser_number = j + bank_origin; 329 | 330 | float time = 0; 331 | if (!timing_offsets_.empty()) { 332 | time = timing_offsets_[i][j] + time_diff_start_to_this_packet; 333 | } 334 | 335 | const auto &corrections = calibration_.laser_corrections[laser_number]; 336 | const auto &measurement = block.data[j]; 337 | unpackPointCommon(data, corrections, measurement, azimuth, time); 338 | } 339 | data.newLine(); 340 | } 341 | } 342 | 343 | /** @brief Point decoding logic common to VLP-16/32/64 344 | */ 345 | void PacketDecoder::unpackPointCommon(PointCloudAggregator &data, 346 | const LaserCorrection &corrections, 347 | const raw_measurement_t &measurement, uint16_t azimuth, 348 | float time) const { 349 | /** Position Calculation */ 350 | if (measurement.distance == 0) // no valid laser beam return 351 | { 352 | // call to addPoint is still required since output could be organized 353 | data.addPoint(nan, nan, nan, corrections.laser_ring, azimuth, nan, nan, time); 354 | return; 355 | } 356 | float distance = measurement.distance * calibration_.distance_resolution_m; 357 | distance += corrections.dist_correction; 358 | 359 | float cos_vert_angle = corrections.cos_vert_correction; 360 | float sin_vert_angle = corrections.sin_vert_correction; 361 | float cos_rot_correction = corrections.cos_rot_correction; 362 | float sin_rot_correction = corrections.sin_rot_correction; 363 | 364 | // cos(a-b) = cos(a)*cos(b) + sin(a)*sin(b) 365 | // sin(a-b) = sin(a)*cos(b) - cos(a)*sin(b) 366 | float cos_rot_angle = cos_rot_table_[azimuth] * cos_rot_correction + // 367 | sin_rot_table_[azimuth] * sin_rot_correction; 368 | float sin_rot_angle = sin_rot_table_[azimuth] * cos_rot_correction - // 369 | cos_rot_table_[azimuth] * sin_rot_correction; 370 | 371 | float horiz_offset = corrections.horiz_offset_correction; 372 | float vert_offset = corrections.vert_offset_correction; 373 | 374 | // Compute the distance in the xy plane (w/o accounting for rotation) 375 | /**the new term of 'vert_offset * sin_vert_angle' 376 | * was added to the expression due to the mathematical 377 | * model we used. 378 | */ 379 | float xy_distance = distance * cos_vert_angle - vert_offset * sin_vert_angle; 380 | 381 | // Calculate temporal X, use absolute value. 382 | float xx = std::abs(xy_distance * sin_rot_angle - horiz_offset * cos_rot_angle); 383 | // Calculate temporal Y, use absolute value 384 | float yy = std::abs(xy_distance * cos_rot_angle + horiz_offset * sin_rot_angle); 385 | 386 | // Get 2points calibration values,Linear interpolation to get distance 387 | // correction for X and Y, that means distance correction use 388 | // different value at different distance 389 | float distance_corr_x = 0; 390 | float distance_corr_y = 0; 391 | if (corrections.two_pt_correction_available) { 392 | distance_corr_x = (corrections.dist_correction - corrections.dist_correction_x) * (xx - 2.4f) / 393 | (25.04f - 2.4f) + 394 | corrections.dist_correction_x; 395 | distance_corr_x -= corrections.dist_correction; 396 | distance_corr_y = (corrections.dist_correction - corrections.dist_correction_y) * (yy - 1.93f) / 397 | (25.04f - 1.93f) + 398 | corrections.dist_correction_y; 399 | distance_corr_y -= corrections.dist_correction; 400 | } 401 | 402 | float distance_x = distance + distance_corr_x; 403 | /**the new term of 'vert_offset * sin_vert_angle' 404 | * was added to the expression due to the mathematical 405 | * model we used. 406 | */ 407 | xy_distance = distance_x * cos_vert_angle - vert_offset * sin_vert_angle; 408 | /// the expression with '-' is proved to be better than the one with '+' 409 | float x = xy_distance * sin_rot_angle - horiz_offset * cos_rot_angle; 410 | 411 | float distance_y = distance + distance_corr_y; 412 | /**the new term of 'vert_offset * sin_vert_angle' 413 | * was added to the expression due to the mathematical 414 | * model we used. 415 | */ 416 | xy_distance = distance_y * cos_vert_angle - vert_offset * sin_vert_angle; 417 | float y = xy_distance * cos_rot_angle + horiz_offset * sin_rot_angle; 418 | 419 | // Using distance_y is not symmetric, but the velodyne manual 420 | // does this. 421 | /**the new term of 'vert_offset * cos_vert_angle' 422 | * was added to the expression due to the mathematical 423 | * model we used. 424 | */ 425 | float z = distance_y * sin_vert_angle + vert_offset * cos_vert_angle; 426 | 427 | /** Use standard ROS coordinate system (right-hand rule) */ 428 | float x_coord = y; 429 | float y_coord = -x; 430 | float z_coord = z; 431 | 432 | /** Intensity Calculation */ 433 | 434 | float min_intensity = corrections.min_intensity; 435 | float max_intensity = corrections.max_intensity; 436 | 437 | float focal_offset = SQR(1 - corrections.focal_distance / 13100.f); 438 | float raw_intensity = measurement.intensity; 439 | float raw_distance = measurement.distance; 440 | float intensity = raw_intensity + 256 * corrections.focal_slope * 441 | std::abs(focal_offset - SQR(1 - raw_distance / 65535.f)); 442 | intensity = (intensity < min_intensity) ? min_intensity : intensity; 443 | intensity = (intensity > max_intensity) ? max_intensity : intensity; 444 | 445 | data.addPoint(x_coord, y_coord, z_coord, corrections.laser_ring, azimuth, distance, intensity, 446 | time); 447 | } 448 | 449 | /** @brief convert raw VLS-128 / Alpha Prime packet to point cloud 450 | * 451 | * @param pkt raw packet to unpack 452 | * @param pc shared pointer to point cloud (points are appended) 453 | */ 454 | void PacketDecoder::unpack_vls128(const VelodynePacket &pkt, PointCloudAggregator &data, 455 | Time scan_start_time) const { 456 | auto *raw = reinterpret_cast(&pkt.data[0]); 457 | 458 | float time_diff_start_to_this_packet = pkt.stamp - scan_start_time; 459 | 460 | bool dual_return = (pkt.data[1204] == 57); 461 | 462 | uint16_t azimuth_next = raw->blocks[0].rotation; 463 | float last_azimuth_diff = 0; 464 | 465 | for (int block = 0; block < BLOCKS_PER_PACKET - (4 * dual_return); block++) { 466 | // cache block for use 467 | const raw_block_t ¤t_block = raw->blocks[block]; 468 | 469 | int bank_origin = 0; 470 | // Used to detect which bank of 32 lasers is in this block 471 | switch (current_block.header) { 472 | case VLS128_BANK_1: 473 | bank_origin = 0; 474 | break; 475 | case VLS128_BANK_2: 476 | bank_origin = 32; 477 | break; 478 | case VLS128_BANK_3: 479 | bank_origin = 64; 480 | break; 481 | case VLS128_BANK_4: 482 | bank_origin = 96; 483 | break; 484 | default: 485 | return; // bad packet: skip the rest 486 | } 487 | 488 | // Calculate difference between current and next block's azimuth angle. 489 | uint16_t azimuth = azimuth_next; 490 | float azimuth_diff; 491 | if (block < (BLOCKS_PER_PACKET - (1 + dual_return))) { 492 | // Get the next block rotation to calculate how far we rotate between blocks 493 | azimuth_next = raw->blocks[block + (1 + dual_return)].rotation; 494 | 495 | // Finds the difference between two successive blocks 496 | azimuth_diff = (float)((36000 + azimuth_next - azimuth) % 36000); 497 | 498 | // This is used when the last block is next to predict rotation amount 499 | last_azimuth_diff = azimuth_diff; 500 | } else { 501 | // This makes the assumption the difference between the last block and the next packet is the 502 | // same as the last to the second to last. 503 | // Assumes RPM doesn't change much between blocks 504 | azimuth_diff = (block == BLOCKS_PER_PACKET - (4 * dual_return) - 1) ? 0 : last_azimuth_diff; 505 | } 506 | 507 | // condition added to avoid calculating points which are not in the interesting defined area 508 | // (min_angle < area < max_angle) 509 | if (!((config_.min_angle < config_.max_angle && 510 | (azimuth >= config_.min_angle && azimuth <= config_.max_angle)) || 511 | (config_.min_angle > config_.max_angle && 512 | (azimuth >= config_.min_angle || azimuth <= config_.max_angle)))) 513 | continue; 514 | 515 | for (int j = 0; j < SCANS_PER_BLOCK; j++) { 516 | // distance extraction 517 | uint16_t raw_distance = current_block.data[j].distance; 518 | float distance = raw_distance * VLS128_DISTANCE_RESOLUTION; 519 | 520 | if (pointInRange(distance)) { 521 | uint8_t laser_number = 522 | j + bank_origin; // Offset the laser in this block by which block it's in 523 | uint8_t firing_order = laser_number / 8; // VLS-128 fires 8 lasers at a time 524 | 525 | float time = 0; 526 | if (!timing_offsets_.empty()) { 527 | time = timing_offsets_[block / 4][firing_order + laser_number / 64] + 528 | time_diff_start_to_this_packet; 529 | } 530 | 531 | const velodyne_decoder::LaserCorrection &corrections = 532 | calibration_.laser_corrections[laser_number]; 533 | 534 | // correct for the laser rotation as a function of timing during the firings 535 | float azimuth_corrected_f = 536 | azimuth + (azimuth_diff * vls_128_laser_azimuth_cache[firing_order]); 537 | uint16_t azimuth_corrected = std::lround(azimuth_corrected_f) % 36000; 538 | 539 | // convert polar coordinates to Euclidean XYZ 540 | float cos_vert_angle = corrections.cos_vert_correction; 541 | float sin_vert_angle = corrections.sin_vert_correction; 542 | float cos_rot_correction = corrections.cos_rot_correction; 543 | float sin_rot_correction = corrections.sin_rot_correction; 544 | 545 | // cos(a-b) = cos(a)*cos(b) + sin(a)*sin(b) 546 | // sin(a-b) = sin(a)*cos(b) - cos(a)*sin(b) 547 | float cos_rot_angle = cos_rot_table_[azimuth_corrected] * cos_rot_correction + 548 | sin_rot_table_[azimuth_corrected] * sin_rot_correction; 549 | float sin_rot_angle = sin_rot_table_[azimuth_corrected] * cos_rot_correction - 550 | cos_rot_table_[azimuth_corrected] * sin_rot_correction; 551 | 552 | // Compute the distance in the xy plane (w/o accounting for rotation) 553 | float xy_distance = distance * cos_vert_angle; 554 | 555 | data.addPoint(xy_distance * cos_rot_angle, -(xy_distance * sin_rot_angle), 556 | distance * sin_vert_angle, corrections.laser_ring, azimuth_corrected, 557 | distance, current_block.data[j].intensity, time); 558 | } 559 | } 560 | data.newLine(); 561 | } 562 | } 563 | 564 | } // namespace velodyne_decoder 565 | -------------------------------------------------------------------------------- /velodyne_decoder-master/src/python.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Martin Valgur 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "velodyne_decoder/config.h" 13 | #include "velodyne_decoder/scan_decoder.h" 14 | #include "velodyne_decoder/stream_decoder.h" 15 | #include "velodyne_decoder/types.h" 16 | 17 | namespace py = pybind11; 18 | using namespace velodyne_decoder; 19 | 20 | /** 21 | * Zero-copy conversion to py::array. 22 | */ 23 | template ::value>::type> 25 | inline py::array_t as_pyarray(Sequence &&seq) { 26 | auto *seq_ptr = new Sequence(seq); 27 | auto capsule = py::capsule(seq_ptr, [](void *p) { delete reinterpret_cast(p); }); 28 | return py::array(seq_ptr->size(), seq_ptr->data(), capsule); 29 | } 30 | 31 | py::array as_contiguous(const PointCloud &cloud) { 32 | const int ncols = 6; 33 | std::vector> arr; 34 | arr.reserve(cloud.size()); 35 | for (const auto &p : cloud) { 36 | arr.push_back({p.x, p.y, p.z, p.intensity, (float)p.ring, p.time}); 37 | } 38 | return as_pyarray(std::move(arr)); 39 | } 40 | 41 | py::array convert(PointCloud &cloud, bool as_pcl_structs) { 42 | if (as_pcl_structs) { 43 | return as_pyarray(std::move(cloud)); 44 | } else { 45 | return as_contiguous(cloud); 46 | } 47 | } 48 | 49 | std::string get_default_calibration(const std::string &model) { 50 | py::function files = py::module::import("importlib_resources").attr("files"); 51 | return py::str(files("velodyne_decoder.calibrations").attr("joinpath")(model + ".yml")) 52 | .cast(); 53 | } 54 | 55 | PYBIND11_MAKE_OPAQUE(std::vector); 56 | 57 | PYBIND11_MODULE(velodyne_decoder_pylib, m) { 58 | m.doc() = ""; 59 | 60 | py::class_(m, "Config") 61 | .def(py::init<>()) 62 | .def(py::init([](const std::string &model, const std::string &calibration_file, 63 | float min_range, float max_range, double min_angle, double max_angle, 64 | double rpm, bool timestamp_first_packet, bool gps_time) { 65 | auto cfg = std::make_unique(); 66 | cfg->model = Config::standardizeModelId(model); 67 | cfg->calibration_file = 68 | calibration_file.empty() ? get_default_calibration(model) : calibration_file; 69 | cfg->min_range = min_range; 70 | cfg->max_range = max_range; 71 | cfg->setMinAngleDeg(min_angle); 72 | cfg->setMaxAngleDeg(max_angle); 73 | cfg->rpm = rpm; 74 | cfg->timestamp_first_packet = timestamp_first_packet; 75 | cfg->gps_time = gps_time; 76 | return cfg; 77 | }), 78 | py::arg("model"), // 79 | py::kw_only(), // 80 | py::arg("calibration_file") = "", // 81 | py::arg("min_range") = 0.1, // 82 | py::arg("max_range") = 200, // 83 | py::arg("min_angle") = 0, // 84 | py::arg("max_angle") = 360, // 85 | py::arg("rpm") = -1, // 86 | py::arg("timestamp_first_packet") = false, // 87 | py::arg("gps_time") = false // 88 | ) 89 | .def_property( 90 | "model", [](const Config &c) { return c.model; }, 91 | [](Config &c, const std::string &model) { 92 | c.model = Config::standardizeModelId(model); 93 | if (c.calibration_file.empty()) { 94 | c.calibration_file = get_default_calibration(c.model); 95 | } 96 | }) 97 | .def_readwrite("calibration_file", &Config::calibration_file) 98 | .def_readwrite("min_range", &Config::min_range) 99 | .def_readwrite("max_range", &Config::max_range) 100 | .def_property("min_angle", &Config::getMinAngleDeg, &Config::setMinAngleDeg) 101 | .def_property("max_angle", &Config::getMaxAngleDeg, &Config::setMaxAngleDeg) 102 | .def_readwrite("rpm", &Config::rpm) 103 | .def_readwrite("timestamp_first_packet", &Config::timestamp_first_packet) 104 | .def_readwrite("gps_time", &Config::gps_time) 105 | .def_readonly_static("SUPPORTED_MODELS", &Config::SUPPORTED_MODELS) 106 | .def_readonly_static("TIMINGS_AVAILABLE", &Config::TIMINGS_AVAILABLE); 107 | 108 | m.def("get_default_calibration", &get_default_calibration); 109 | 110 | py::class_(m, "VelodynePacket") 111 | .def(py::init<>()) 112 | .def(py::init()) 113 | .def_readwrite("stamp", &VelodynePacket::stamp) 114 | .def_readwrite("data", &VelodynePacket::data); 115 | 116 | py::bind_vector>(m, "PacketVector"); 117 | 118 | PYBIND11_NUMPY_DTYPE(PointXYZIRT, x, y, z, intensity, ring, time); 119 | 120 | py::class_(m, "ScanDecoder") 121 | .def(py::init(), py::arg("config")) 122 | .def( 123 | "decode", 124 | [](ScanDecoder &decoder, Time stamp, const std::vector &scan_packets, 125 | bool as_pcl_structs) { 126 | auto cloud = decoder.decode(stamp, scan_packets); 127 | return convert(cloud, as_pcl_structs); 128 | }, 129 | py::arg("scan_stamp"), py::arg("scan_packets"), py::arg("as_pcl_structs") = false, // 130 | py::return_value_policy::move) 131 | .def( 132 | "decode_message", 133 | [](ScanDecoder &decoder, const py::object &scan_msg, bool as_pcl_structs) { 134 | std::vector packets; 135 | py::iterable packets_py = scan_msg.attr("packets"); 136 | for (const auto &packet_py : packets_py) { 137 | auto packet = packet_py.attr("data").cast(); 138 | auto stamp = packet_py.attr("stamp").attr("to_sec")().cast(); 139 | packets.emplace_back(stamp, packet); 140 | } 141 | auto stamp = scan_msg.attr("header").attr("stamp").attr("to_sec")().cast(); 142 | auto cloud = decoder.decode(stamp, packets); 143 | return convert(cloud, as_pcl_structs); 144 | }, 145 | py::arg("scan_msg"), py::arg("as_pcl_structs") = false, // 146 | py::return_value_policy::move); 147 | 148 | py::class_(m, "StreamDecoder") 149 | .def(py::init(), py::arg("config")) 150 | .def( 151 | "decode", 152 | [](StreamDecoder &decoder, Time stamp, const RawPacketData &packet, 153 | bool as_pcl_structs) -> std::optional> { 154 | auto result = decoder.decode(stamp, packet); 155 | if (result) { 156 | auto &[scan_stamp, cloud] = *result; 157 | return std::make_pair(scan_stamp, convert(cloud, as_pcl_structs)); 158 | } 159 | return std::nullopt; 160 | }, 161 | py::arg("stamp"), py::arg("packet"), py::arg("as_pcl_structs") = false, // 162 | py::return_value_policy::move) 163 | .def("calc_packets_per_scan", &StreamDecoder::calc_packets_per_scan); 164 | 165 | m.attr("PACKET_SIZE") = PACKET_SIZE; 166 | 167 | #define STRING(s) #s 168 | #ifdef VERSION_INFO 169 | m.attr("__version__") = STRING(VERSION_INFO); 170 | #else 171 | m.attr("__version__") = "dev"; 172 | #endif 173 | } 174 | -------------------------------------------------------------------------------- /velodyne_decoder-master/src/scan_decoder.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Martin Valgur 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #include "velodyne_decoder/scan_decoder.h" 5 | 6 | namespace velodyne_decoder { 7 | 8 | ScanDecoder::ScanDecoder(const Config &config) 9 | : packet_decoder_(config), 10 | cloud_aggregator_(config.max_range, config.min_range, packet_decoder_.scansPerPacket()) {} 11 | 12 | PointCloud ScanDecoder::decode(Time scan_stamp, const std::vector &scan_packets) { 13 | cloud_aggregator_.init(scan_packets); 14 | for (const auto &packet : scan_packets) { 15 | packet_decoder_.unpack(packet, cloud_aggregator_, scan_stamp); 16 | } 17 | return cloud_aggregator_.cloud; 18 | } 19 | 20 | inline PointCloud ScanDecoder::decode(const VelodyneScan &scan) { 21 | return decode(scan.stamp, scan.packets); 22 | } 23 | 24 | } // namespace velodyne_decoder 25 | -------------------------------------------------------------------------------- /velodyne_decoder-master/src/stream_decoder.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2007, 2009-2012 Austin Robot Technology, Patrick Beeson, Jack O'Quin 3 | * Copyright (C) 2021, Martin Valgur 4 | * 5 | * License: Modified BSD Software License Agreement 6 | */ 7 | 8 | #include "velodyne_decoder/stream_decoder.h" 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "velodyne_decoder/time_conversion.h" 16 | 17 | namespace velodyne_decoder { 18 | 19 | StreamDecoder::StreamDecoder(const Config &config) : config_(config), scan_decoder_(config) { 20 | packets_per_scan_ = calc_packets_per_scan(config_.model, config_.rpm); 21 | scan_packets_.reserve(packets_per_scan_); 22 | } 23 | 24 | int StreamDecoder::calc_packets_per_scan(const std::string &model, double rpm) { 25 | double packet_rate; // packet frequency (Hz) 26 | if (model == "Alpha Prime") { 27 | // 3 firing cycles in a data packet. 3 x 53.3 μs = 0.1599 ms is the 28 | // accumulation delay per packet. 29 | // 1 packet/0.1599 ms = 6253.9 packets/second 30 | packet_rate = 6253.9; 31 | } else if (model == "HDL-64E_S2" || model == "HDL-64E_S2.1") { 32 | // generates 1333312 points per second 33 | // 1 packet holds 384 points 34 | packet_rate = 1333312. / 384.; 35 | } else if (model == "HDL-64E") { 36 | packet_rate = 2600.0; 37 | } else if (model == "HDL-64E_S3") { 38 | // generates 2222220 points per second (half for strongest and half for latest) 39 | // 1 packet holds 384 points 40 | packet_rate = 2222220. / 384.; 41 | } else if (model == "HDL-32E") { 42 | packet_rate = 1808.0; 43 | } else if (model == "VLP-32C") { 44 | packet_rate = 1507.0; 45 | } else if (model == "VLP-16") { 46 | // 754 Packets/Second for Last or Strongest mode 1508 for dual (VLP-16 User Manual) 47 | packet_rate = 754; 48 | } else { 49 | throw std::invalid_argument("Unknown sensor model: " + model); 50 | } 51 | if (rpm <= 0) { 52 | throw std::invalid_argument("Invalid RPM value in config: " + std::to_string(rpm)); 53 | } 54 | double frequency = rpm / 60.0; 55 | return static_cast(ceil(packet_rate / frequency)); 56 | } 57 | 58 | std::optional> // 59 | StreamDecoder::decode(Time stamp, const RawPacketData &packet) { 60 | if (config_.gps_time) { 61 | stamp = getPacketTimestamp(&(packet[1200]), stamp); 62 | } 63 | scan_packets_.emplace_back(stamp, packet); 64 | if (scan_packets_.size() == packets_per_scan_) { 65 | Time scan_stamp; 66 | if (config_.timestamp_first_packet) { 67 | scan_stamp = scan_packets_.front().stamp; 68 | } else { 69 | scan_stamp = scan_packets_.back().stamp; 70 | } 71 | PointCloud scan = scan_decoder_.decode(scan_stamp, scan_packets_); 72 | scan_packets_.clear(); 73 | return std::make_pair(scan_stamp, scan); 74 | } 75 | return std::nullopt; 76 | } 77 | 78 | std::optional> // 79 | StreamDecoder::decode(const VelodynePacket &packet) { 80 | return decode(packet.stamp, packet.data); 81 | } 82 | 83 | } // namespace velodyne_decoder 84 | -------------------------------------------------------------------------------- /velodyne_decoder-master/test/conftest.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | from pathlib import Path 3 | 4 | import pytest 5 | import requests 6 | 7 | data_dir = Path(tempfile.tempdir) / "velodyne_decoder_data" 8 | base_url = "https://github.com/valgur/velodyne_decoder/releases/download/v1.0.1/" 9 | 10 | 11 | def fetch(name): 12 | url = base_url + name 13 | if not data_dir.exists(): 14 | data_dir.mkdir() 15 | path = data_dir / name 16 | if not path.exists(): 17 | with path.open("wb") as f: 18 | f.write(requests.get(url).content) 19 | return path 20 | 21 | 22 | @pytest.fixture 23 | def sample_pcap_path(): 24 | return fetch("vlp16.pcap") 25 | 26 | 27 | @pytest.fixture 28 | def sample_bag_path(): 29 | return fetch("vlp16.bag") 30 | -------------------------------------------------------------------------------- /velodyne_decoder-master/test/test_velodyne_decoder.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import velodyne_decoder as vd 4 | 5 | pcl_struct_dtype = {'names': ['x', 'y', 'z', 'intensity', 'ring', 'time'], 6 | 'formats': ['= t0 50 | 51 | 52 | def test_bag_as_contiguous_array(sample_bag_path, config): 53 | pcds = list(vd.read_bag(sample_bag_path, config, "/velodyne_packets", as_pcl_structs=False)) 54 | assert len(pcds) == 13 55 | stamp, pcd, topic = pcds[0] 56 | assert topic == "/velodyne_packets" 57 | assert stamp.to_sec() == 1636622716.742135 58 | assert pcd.shape == (27300, 6) 59 | assert pcd.dtype.name == "float32" 60 | 61 | 62 | def test_bag_as_struct_array(sample_bag_path, config): 63 | pcds = list(vd.read_bag(sample_bag_path, config, "/velodyne_packets", as_pcl_structs=True)) 64 | assert len(pcds) == 13 65 | stamp, pcd, topic = pcds[0] 66 | assert topic == "/velodyne_packets" 67 | assert stamp.to_sec() == 1636622716.742135 68 | assert pcd.shape == (27300,) 69 | assert pcd.dtype == pcl_struct_dtype 70 | 71 | 72 | def test_bag_automatic_topic(sample_bag_path, config): 73 | pcds = list(vd.read_bag(sample_bag_path, config)) 74 | assert len(pcds) == 13 75 | stamp, pcd, topic = pcds[0] 76 | assert topic == "/velodyne_packets" 77 | assert stamp.to_sec() == 1636622716.742135 78 | assert pcd.shape == (27300, 6) 79 | 80 | 81 | def test_bag_ros_time(sample_bag_path, config): 82 | pcds = list(vd.read_bag(sample_bag_path, config, use_header_time=False)) 83 | assert len(pcds) == 13 84 | stamp, pcd, topic = pcds[0] 85 | assert topic == "/velodyne_packets" 86 | assert stamp.to_sec() == 1636622716.7427142 87 | assert pcd.shape == (27300, 6) 88 | 89 | 90 | def test_bag_frame_id(sample_bag_path, config): 91 | pcds = list(vd.read_bag(sample_bag_path, config, return_frame_id=True)) 92 | stamp, pcd, topic, frame_id = pcds[0] 93 | assert topic == "/velodyne_packets" 94 | assert stamp.to_sec() == 1636622716.742135 95 | assert pcd.shape == (27300, 6) 96 | assert frame_id == "velodyne" 97 | 98 | 99 | def test_bag_time_range(sample_bag_path, config): 100 | t0 = 1636622717 101 | pcds = list(vd.read_bag(sample_bag_path, config, time_range=(t0, None))) 102 | stamp, pcd, topic = pcds[0] 103 | assert stamp.to_sec() >= t0 104 | 105 | 106 | @pytest.mark.parametrize("model_id", vd.Config.SUPPORTED_MODELS) 107 | def test_bundled_calibrations(model_id): 108 | if model_id in ["HDL-64E_S2", "HDL-64E_S3"]: 109 | # No default calibration has been provided for these by VeloView 110 | return 111 | vd.ScanDecoder(vd.Config(model_id)) 112 | 113 | 114 | def test_model_renaming(): 115 | config = vd.Config("VLS-128") 116 | assert config.model == "Alpha Prime" 117 | config = vd.Config() 118 | config.model = "VLS-128" 119 | assert config.model == "Alpha Prime" 120 | -------------------------------------------------------------------------------- /velodyne_decoder-master/velodyne_decoder/__init__.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from contextlib import contextmanager 3 | 4 | import dpkt 5 | import sys 6 | from velodyne_decoder_pylib import * 7 | 8 | is_py2 = sys.version_info[0] == 2 9 | 10 | 11 | def read_pcap(pcap_file, config, as_pcl_structs=False, time_range=(None, None)): 12 | """Decodes and yields all point clouds stored in a PCAP file. 13 | 14 | `model` and `rpm` parameters must be set in the provided config. 15 | 16 | Parameters 17 | ---------- 18 | pcap_file : path or file handle 19 | config : Config 20 | as_pcl_structs : bool 21 | If False, the returned NumPy arrays will be a contiguous array of floats (default). 22 | If True, the returned NumPy arrays will contain PCL-compatible structs with dtype 23 | {'names': ['x', 'y', 'z', 'intensity', 'ring', 'time'], 24 | 'formats': [' end_time): 41 | continue 42 | data = dpkt.ethernet.Ethernet(buf).data.data.data 43 | if is_py2: 44 | data = bytearray(data) 45 | if len(data) != PACKET_SIZE: 46 | continue 47 | result = decoder.decode(stamp, data, as_pcl_structs) 48 | if result is not None: 49 | yield ResultTuple(*result) 50 | 51 | 52 | def _get_velodyne_scan_topics(bag): 53 | topics = [] 54 | for topic, info in bag.get_type_and_topic_info()[1].items(): 55 | if info.msg_type == "velodyne_msgs/VelodyneScan": 56 | topics.append(topic) 57 | return topics 58 | 59 | 60 | def read_bag(bag_file, config, topics=None, as_pcl_structs=False, use_header_time=True, 61 | return_frame_id=False, time_range=(None, None)): 62 | """Decodes and yields all point clouds stored in a ROS bag file. 63 | 64 | `model` parameter must be set in the provided config. 65 | 66 | Parameters 67 | ---------- 68 | bag_file : path or file handle 69 | config : Config 70 | topics : str or list of str 71 | as_pcl_structs : bool 72 | If False, the returned NumPy arrays will be a contiguous array of floats (default). 73 | If True, the returned NumPy arrays will contain PCL-compatible structs with dtype 74 | {'names': ['x', 'y', 'z', 'intensity', 'ring', 'time'], 75 | 'formats': ['