├── .gitignore ├── LICENSE ├── README.md ├── batch_generation.py ├── config.yml ├── evaluation.py ├── octomap_empty.py ├── octomap_gr.py ├── octomap_knn.py ├── octomap_occupied.py ├── range_image_map.py ├── time_utils.py ├── utility └── compile_res.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Photogrammetry & Robotics Bonn 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dynamic-point-removal 2 | Static Map Generation from 3D LiDAR Point Clouds Exploiting Ground Segmentation 3 | -------------------------------------------------------------------------------- /batch_generation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def get_batches(poses, batch_size): 5 | """ 6 | Creates smaller batches for larger sequences 7 | 8 | Args: 9 | poses (List: nx4x4): Poses for the LiDAR scans 10 | batch_size (int): Size of a single batch in meters 11 | 12 | Returns: 13 | val: List of batches containing the starting and ending index for each batch 14 | len: Length of batch size + 100 15 | """ 16 | length = batch_size 17 | 18 | curr_dist = 0 19 | start_idx = 0 20 | val = [] 21 | 22 | prev_pose = [[1.0, 0.0, 0.0, 0.0], 23 | [0.0, 1.0, 0.0, 0.0], 24 | [0.0, 0.0, 1.0, 0.0], 25 | [0.0, 0.0, 0.0, 1.0]] 26 | 27 | for i in range(len(poses)): 28 | curr_pose = poses[i] 29 | dist = np.sqrt((curr_pose[0][3] - prev_pose[0][3]) ** 2 + (curr_pose[1][3] - prev_pose[1][3]) ** 2 + ( 30 | curr_pose[2][3] - prev_pose[2][3]) ** 2) 31 | curr_dist = curr_dist + dist 32 | 33 | if curr_dist >= length: 34 | end_idx = i 35 | val.append([start_idx, end_idx]) 36 | start_idx = i + 1 37 | curr_dist = 0 38 | 39 | prev_pose = curr_pose 40 | val.append([start_idx, len(poses)]) 41 | return val, length + 100 42 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | Dataset: 2 | # Batch size for longer sequence 3 | batch_size: 300 4 | 5 | # Please change the following paths 6 | path_to_calibration: /path/to/calib 7 | path_to_gt: /path/to/gt 8 | path_to_poses: /path/to/poses 9 | path_to_scans: /path/to/scans 10 | 11 | # Sequence number (in case the dataset contains a number of sequences e.g KITTI Dataset) 12 | sequence: 5 13 | 14 | Height_Map: 15 | resolution: [0.01, 0.03, 0.05, 0.07, 0.09] # List of resolutions using which ground is required to be segmented 16 | backward_range: -50.0 # Maximum -ve x_value for which points are to be considered. 17 | fwd_range: 50.0 # Maximum +ve x_value for which points are to be considered. 18 | left_range: 50.0 # Maximum +ve y_value for which points are to be considered. 19 | right_range: -50.0 # # Maximum -ve y_value for which points are to be considered. 20 | top: 1.5 # Maximum +ve z_value for which points are to be considered. 21 | bottom: -2.0 # Maximum -ve z_value for which points are to be considered. 22 | 23 | Octomap: 24 | ground_removal: true # Boolean for application of ground removal 25 | height_filter: false # Boolean for application of a height filter 26 | 27 | nearest_neighbors: 5 # Number of nearest neighbors to be considered while performing k-NN 28 | resolution: 0.1 # Resolution on which the OctoMap will be running 29 | 30 | Results: 31 | # Storage options 32 | store_individual_label: true 33 | store_pcd: true 34 | -------------------------------------------------------------------------------- /evaluation.py: -------------------------------------------------------------------------------- 1 | import open3d as o3d 2 | import numpy as np 3 | from tqdm import tqdm 4 | import utils 5 | 6 | 7 | def get_static_dynamic_idx(gt_map): 8 | colors = np.asarray(gt_map.colors) 9 | static_idx = np.where(colors[:, 0] == 1.0)[0] 10 | dynamic_idx = np.where(colors[:, 2] == 1.0)[0] 11 | 12 | return static_idx, dynamic_idx 13 | 14 | 15 | def eval(static_idx: np.ndarray, 16 | dynamic_idx: np.ndarray, 17 | start: int, 18 | end: int, 19 | poses: np.ndarray, 20 | path_to_scan: str, 21 | path_to_gt: str, 22 | ): 23 | """ 24 | Creates smaller batches for larger sequences 25 | 26 | Args: 27 | static_idx (np.ndarray): List of indices corresponding to the static points in Point cloud 28 | dynamic_idx (np.ndarray): List of indices corresponding to the dynamic points in Point cloud 29 | start (int): Starting index of batch 30 | end (int): Ending index of batch 31 | poses (List: nx4x4): Poses for the LiDAR scans 32 | path_to_scan (str): Path to the scans 33 | path_to_gt (str): Path to ground truth labels 34 | 35 | Returns: 36 | ts: Total number of correctly identified static points 37 | td: Total number of correctly identified dynamic points 38 | total_static: Total number of static points in ground truth 39 | total_dynamic: Total number of dynamic points in ground truth 40 | 41 | """ 42 | gt_map = o3d.geometry.PointCloud() 43 | 44 | for scan_no in tqdm(range(start, end)): 45 | pcd = utils.convert_kitti_bin_to_pcd(f"{path_to_scan}{str(scan_no).zfill(6)}.bin") 46 | 47 | # Loading ground truth labels 48 | gt_label_file = f"{path_to_gt}{str(scan_no).zfill(6)}.label" 49 | gt_label = utils.load_label(gt_label_file) 50 | 51 | gt_label = gt_label & 0xFFFF 52 | mask = gt_label > 200 53 | gt_label[mask] = 251 54 | gt_label[~mask] = 9 55 | 56 | colors = np.full((len(np.asarray(pcd.points)), 3), [1.0, 0.0, 0.0]) 57 | colors[gt_label == 251] = [0.0, 0.0, 1.0] 58 | 59 | pcd.colors = o3d.utility.Vector3dVector(np.asarray(colors)) 60 | 61 | # Creating ground truth map 62 | gt_map = gt_map + pcd.transform(poses[scan_no]) 63 | 64 | gt_static_idx, gt_dynamic_idx = get_static_dynamic_idx(gt_map) 65 | 66 | max_dis = 100000 67 | 68 | # Downsampling created ground truth map 69 | data = gt_map.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 70 | [max_dis, max_dis, max_dis]) 71 | down_idx = data[1] 72 | 73 | # Calculating result statistics 74 | down_idx = down_idx[down_idx > 0] 75 | gt_static_idx_ds = list(set(down_idx).intersection(gt_static_idx)) 76 | gt_dynamic_idx_ds = list(set(down_idx).intersection(gt_dynamic_idx)) 77 | 78 | true_static = list(set(gt_static_idx_ds) & set(list(static_idx))) 79 | true_dynamic = list(set(gt_dynamic_idx_ds) & set(list(dynamic_idx))) 80 | 81 | total_static = len(gt_static_idx_ds) 82 | total_dynamic = len(gt_dynamic_idx_ds) 83 | 84 | ts, td = len(true_static), len(true_dynamic) 85 | 86 | return ts, td, total_static, total_dynamic 87 | -------------------------------------------------------------------------------- /octomap_empty.py: -------------------------------------------------------------------------------- 1 | import octomap 2 | import numpy as np 3 | import open3d as o3d 4 | 5 | import yaml 6 | import json 7 | from tqdm import tqdm 8 | 9 | import utils 10 | from range_image_map import segment_ground_multi_res 11 | import evaluation 12 | from batch_generation import get_batches 13 | 14 | 15 | def get_scan_wise_labels(octree: octomap.OcTree, 16 | scan: o3d.geometry.PointCloud(), 17 | scan_no: int, 18 | nearest_neighbors: int, 19 | seq: int, 20 | ground_removal: bool): 21 | """ 22 | Provides final ground points calculated using certain 23 | resolution and after application of height filter 24 | 25 | Args: 26 | octree (octomap.OcTree): Octree created while generating static and dynamic map 27 | scan (o3d.geometry.PointCloud()): Point Cloud for which labels are required 28 | scan_no(int): Scan number for the particular scan 29 | nearest_neighbors(int): Number of nearest neighbors to be searched for labeling of unknown points 30 | seq(int): Sequence to which scans belong (Parameter to be used for KITTI Dataset) 31 | ground_removal(bool): True -> To apply ground removal 32 | False -> For results without ground removal 33 | """ 34 | 35 | # Extracting labels from OcTree for input scan 36 | points = np.asarray(scan.points) 37 | labels = octree.getLabels(points) 38 | 39 | empty_idx, occupied_idx, unknown_idx = [], [], [] 40 | 41 | for i in range(len(labels)): 42 | if labels[i] == 0: 43 | empty_idx.append(i) 44 | elif labels[i] == 1: 45 | occupied_idx.append(i) 46 | else: 47 | unknown_idx.append(i) 48 | 49 | colors = np.full((len(points), 3), [1.0, 0.0, 0.0]) 50 | colors[empty_idx] = [0.0, 0.0, 1.0] 51 | 52 | scan.colors = o3d.utility.Vector3dVector(np.asarray(colors)) 53 | 54 | known_idx = np.concatenate((occupied_idx, empty_idx), axis=None) 55 | pcd_known = scan.select_by_index(known_idx) 56 | 57 | pred_tree = o3d.geometry.KDTreeFlann(pcd_known) 58 | color = np.asarray(pcd_known.colors) 59 | 60 | static_idx, dynamic_idx = [], [] 61 | 62 | # Assigning labels to unknown labels 63 | for pt in unknown_idx: 64 | [_, idx, _] = pred_tree.search_knn_vector_3d(points[pt], nearest_neighbors) 65 | 66 | final_score = np.mean(color[idx, 0]) 67 | 68 | if final_score > 0.5: 69 | static_idx.append(pt) 70 | else: 71 | dynamic_idx.append(pt) 72 | 73 | static_idx = np.concatenate((occupied_idx, static_idx), axis=None) 74 | dynamic_idx = np.concatenate((empty_idx, dynamic_idx), axis=None) 75 | static_idx = static_idx.astype(np.int32) 76 | dynamic_idx = dynamic_idx.astype(np.int32) 77 | labels = np.full((len(static_idx) + len(dynamic_idx),), 9) 78 | labels[dynamic_idx] = 251 79 | 80 | # Storing labels for input scan 81 | if ground_removal: 82 | file_name = f"./labels/{seq}/{str(scan_no).zfill(6)}" 83 | else: 84 | file_name = f"./labels/{seq}/orig/{str(scan_no).zfill(6)}" 85 | labels.reshape((-1)).astype(np.int32) 86 | labels.tofile(file_name + '.label') 87 | 88 | 89 | def main(): 90 | # Loading Parameters 91 | with open('config.yml') as f: 92 | config = yaml.safe_load(f) 93 | 94 | # Initializing Parameters 95 | seq = config['Dataset']['sequence'] 96 | batch_size = config['Dataset']['batch_size'] 97 | pose_file = config['Dataset']['path_to_poses'] 98 | calib_file = config['Dataset']['path_to_calibration'] 99 | path_to_scans = config['Dataset']['path_to_scans'] 100 | path_to_gt = config['Dataset']['path_to_gt'] 101 | 102 | nearest_neighbors = config['Octomap']['nearest_neighbors'] 103 | resolution = config['Octomap']['resolution'] 104 | ground_removal = config['Octomap']['ground_removal'] 105 | height_filter = config['Octomap']['height_filter'] 106 | height_threshold = 0.5 107 | octree = octomap.OcTree(resolution) 108 | 109 | hm_resolution = config['Height_Map']['resolution'] 110 | fwd_range = (config['Height_Map']['backward_range'], config['Height_Map']['fwd_range']) 111 | side_range = (config['Height_Map']['right_range'], config['Height_Map']['left_range']) 112 | height_range = (config['Height_Map']['bottom'], config['Height_Map']['top']) 113 | 114 | store_pcd = config['Results']['store_pcd'] 115 | store_individual_label = config['Results']['store_individual_label'] 116 | 117 | poses = utils.load_poses(pose_file) 118 | inv_frame0 = np.linalg.inv(poses[0]) 119 | 120 | # load calibrations 121 | T_cam_velo = utils.load_calib(calib_file) 122 | T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4)) 123 | T_velo_cam = np.linalg.inv(T_cam_velo) 124 | 125 | # convert kitti poses from camera coord to LiDAR coord 126 | new_poses = [] 127 | for pose in poses: 128 | new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo)) 129 | poses = np.array(new_poses) 130 | 131 | # creating batches for the sequence 132 | batches, max_dis = get_batches(poses, batch_size) 133 | print(len(batches), "batches created") 134 | 135 | for batch in batches: 136 | octree.clear() 137 | octree = octomap.OcTree(resolution) 138 | start, end = batch[0], batch[1] 139 | pcds = {} 140 | final_map = o3d.geometry.PointCloud() 141 | start_idx, end_idx = start, end 142 | 143 | print(f"Running OctoMap from {batch[0]} till {batch[1]}") 144 | for scan_no in tqdm(range(start_idx, end_idx)): 145 | 146 | pcd = utils.convert_kitti_bin_to_pcd(f"{path_to_scans}{str(scan_no).zfill(6)}.bin") 147 | pcds[scan_no] = pcd 148 | 149 | # Applying ground segmentation on Point Cloud 150 | ground_pcd, non_ground_pcd, ground_indices, non_ground_indices = \ 151 | segment_ground_multi_res(pcd=pcd, 152 | res=hm_resolution, 153 | fwd_range=fwd_range, 154 | side_range=side_range, 155 | height_range=height_range, 156 | height_threshold=-1.1) 157 | 158 | points = np.asarray(pcd.points) 159 | ground_pcd = ground_pcd.transform(poses[scan_no]) 160 | ground_points = np.asarray(ground_pcd.points) 161 | 162 | ht_filter = np.where(points[:, 2] > height_threshold)[0] 163 | ht_points = points[ht_filter] 164 | 165 | final_map = final_map + pcd.transform(poses[scan_no]) 166 | 167 | pose = poses[scan_no] 168 | 169 | # Inserting Point Cloud into OcTree 170 | octree.insertPointCloud( 171 | pointcloud=points, 172 | origin=np.array([pose[0][3], pose[1][3], pose[2][3]], dtype=float), 173 | maxrange=80.0, 174 | ) 175 | 176 | # Setting ground points as static in the Occupancy grid 177 | if ground_removal: 178 | for pt in range(len(ground_points)): 179 | try: 180 | key = octree.coordToKey(ground_points[pt]) 181 | node = octree.search(key) 182 | node.setValue(200.0) 183 | except Exception as e: 184 | print(e) 185 | 186 | # Setting points above a certain height as static in the Occupancy grid 187 | if height_filter: 188 | for pt in range(len(ht_points)): 189 | try: 190 | key = octree.coordToKey(ht_points[pt]) 191 | node = octree.search(key) 192 | node.setValue(200.0) 193 | except Exception as e: 194 | print(e) 195 | 196 | octree.updateInnerOccupancy() 197 | 198 | # Extracting labels from OcTree 199 | final_points = np.asarray(final_map.points) 200 | labels = octree.getLabels(final_points) 201 | 202 | occupied_idx, empty_idx, unknown_idx = [], [], [] 203 | for i in range(len(labels)): 204 | if labels[i] == 1: 205 | occupied_idx.append(i) 206 | else: 207 | empty_idx.append(i) 208 | 209 | # pcd_static = final_map.select_by_index(occupied_idx) 210 | # pcd_dynamic = final_map.select_by_index(empty_idx) 211 | 212 | # color_static = np.full((len(np.asarray(pcd_static.points)), 3), [1.0, 0.0, 0.0]) 213 | # color_dynamic = np.full((len(np.asarray(pcd_dynamic.points)), 3), [0.0, 0.0, 1.0]) 214 | 215 | # pcd_static.colors = o3d.utility.Vector3dVector(np.asarray(color_static)) 216 | # pcd_dynamic.colors = o3d.utility.Vector3dVector(np.asarray(color_dynamic)) 217 | 218 | # pcd = pcd_static + pcd_dynamic 219 | # pred_tree = o3d.geometry.KDTreeFlann(pcd) 220 | # color = np.asarray(pcd.colors) 221 | 222 | # # Assigning labels to unknown labels 223 | # for pt in unknown_idx: 224 | # [_, idx, _] = pred_tree.search_knn_vector_3d(final_points[pt], nearest_neighbors) 225 | # final_score = np.mean(color[idx, 0]) 226 | 227 | # if final_score > 0.5: 228 | # occupied_idx.append(pt) 229 | # else: 230 | # empty_idx.append(pt) 231 | 232 | pcd_static = final_map.select_by_index(occupied_idx) 233 | pcd_dynamic = final_map.select_by_index(empty_idx) 234 | 235 | # Downsampling static and dynamic point cloud 236 | data = pcd_static.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 237 | [max_dis, max_dis, max_dis]) 238 | pcd_static = data[0] 239 | data = pcd_dynamic.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 240 | [max_dis, max_dis, max_dis]) 241 | 242 | pcd_dynamic = data[0] 243 | data = final_map.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 244 | [max_dis, max_dis, max_dis]) 245 | down_idx = data[1] 246 | down_idx = down_idx[down_idx > 0] 247 | 248 | # Calculating static and dynamic indices for original map 249 | static_idx = list(set(occupied_idx) & set(down_idx)) 250 | dynamic_idx = list(set(empty_idx) & set(down_idx)) 251 | 252 | # Performing evaluation 253 | print("performing eval ...") 254 | ts, td, total_static, total_dynamic = evaluation.eval(static_idx=static_idx, 255 | dynamic_idx=dynamic_idx, 256 | start=start, 257 | end=end, 258 | poses=poses, 259 | path_to_scan=path_to_scans, 260 | path_to_gt=path_to_gt) 261 | print("TS", ts, "Total Static", total_static) 262 | print("TD", td, "Total Dynamic", total_dynamic) 263 | 264 | key = str(start) + "-" + str(end) 265 | res = {key: {"TS": ts, "TD": td, "Total Voxels": total_static, "Total Dynamic Points": total_dynamic, 266 | "Accuracy": (ts + td) / (total_dynamic + total_static + 1e-8), 267 | "Accuracy_1": (ts / (2 * total_static + 1e-8)) + (td / (2 * total_dynamic + 1e-8)), 268 | "Recall": td / (total_dynamic + 1e-8)}} 269 | 270 | # Storing results 271 | json_data = json.dumps(res) 272 | if ground_removal: 273 | f = open(f'./json/{seq}.json', 'a+') 274 | f.write(json_data + "\n") 275 | f.close() 276 | else: 277 | f = open(f'./json/{seq}_orig.json', 'a+') 278 | f.write(json_data + "\n") 279 | f.close() 280 | 281 | # Storing static and dynamic point cloud for individual scans 282 | if store_pcd: 283 | o3d.io.write_point_cloud(f"./pcd/{seq}/static/{int(start)}-{int(end)}.pcd", pcd_static) 284 | o3d.io.write_point_cloud(f"./pcd/{seq}/dynamic/{int(start)}-{int(end)}.pcd", pcd_dynamic) 285 | 286 | # Storing labels for individual scans 287 | if store_individual_label: 288 | print("Storing Scan Wise Labels") 289 | for scan_no in tqdm(range(start, end)): 290 | pcd = pcds[scan_no] 291 | get_scan_wise_labels(octree, pcd, scan_no, nearest_neighbors, seq, ground_removal) 292 | 293 | 294 | if __name__ == '__main__': 295 | main() 296 | -------------------------------------------------------------------------------- /octomap_gr.py: -------------------------------------------------------------------------------- 1 | import octomap 2 | import numpy as np 3 | import open3d as o3d 4 | 5 | import yaml 6 | import json 7 | from tqdm import tqdm 8 | 9 | import utils 10 | from range_image_map import segment_ground_multi_res 11 | import evaluation 12 | from batch_generation import get_batches 13 | 14 | 15 | def get_scan_wise_labels(octree: octomap.OcTree, 16 | scan: o3d.geometry.PointCloud(), 17 | scan_no: int, 18 | nearest_neighbors: int, 19 | seq: int, 20 | ground_removal: bool): 21 | """ 22 | Provides final ground points calculated using certain 23 | resolution and after application of height filter 24 | 25 | Args: 26 | octree (octomap.OcTree): Octree created while generating static and dynamic map 27 | scan (o3d.geometry.PointCloud()): Point Cloud for which labels are required 28 | scan_no(int): Scan number for the particular scan 29 | nearest_neighbors(int): Number of nearest neighbors to be searched for labeling of unknown points 30 | seq(int): Sequence to which scans belong (Parameter to be used for KITTI Dataset) 31 | ground_removal(bool): True -> To apply ground removal 32 | False -> For results without ground removal 33 | """ 34 | 35 | # Extracting labels from OcTree for input scan 36 | points = np.asarray(scan.points) 37 | labels = octree.getLabels(points) 38 | 39 | empty_idx, occupied_idx, unknown_idx = [], [], [] 40 | 41 | for i in range(len(labels)): 42 | if labels[i] == 0: 43 | empty_idx.append(i) 44 | elif labels[i] == 1: 45 | occupied_idx.append(i) 46 | else: 47 | unknown_idx.append(i) 48 | 49 | colors = np.full((len(points), 3), [1.0, 0.0, 0.0]) 50 | colors[empty_idx] = [0.0, 0.0, 1.0] 51 | 52 | scan.colors = o3d.utility.Vector3dVector(np.asarray(colors)) 53 | 54 | known_idx = np.concatenate((occupied_idx, empty_idx), axis=None) 55 | pcd_known = scan.select_by_index(known_idx) 56 | 57 | pred_tree = o3d.geometry.KDTreeFlann(pcd_known) 58 | color = np.asarray(pcd_known.colors) 59 | 60 | static_idx, dynamic_idx = [], [] 61 | 62 | # Assigning labels to unknown labels 63 | for pt in unknown_idx: 64 | [_, idx, _] = pred_tree.search_knn_vector_3d(points[pt], nearest_neighbors) 65 | 66 | final_score = np.mean(color[idx, 0]) 67 | 68 | if final_score > 0.5: 69 | static_idx.append(pt) 70 | else: 71 | dynamic_idx.append(pt) 72 | 73 | static_idx = np.concatenate((occupied_idx, static_idx), axis=None) 74 | dynamic_idx = np.concatenate((empty_idx, dynamic_idx), axis=None) 75 | static_idx = static_idx.astype(np.int32) 76 | dynamic_idx = dynamic_idx.astype(np.int32) 77 | labels = np.full((len(static_idx) + len(dynamic_idx),), 9) 78 | labels[dynamic_idx] = 251 79 | 80 | # Storing labels for input scan 81 | if ground_removal: 82 | file_name = f"./labels/{seq}/{str(scan_no).zfill(6)}" 83 | else: 84 | file_name = f"./labels/{seq}/orig/{str(scan_no).zfill(6)}" 85 | labels.reshape((-1)).astype(np.int32) 86 | labels.tofile(file_name + '.label') 87 | 88 | 89 | def main(): 90 | # Loading Parameters 91 | with open('config.yml') as f: 92 | config = yaml.safe_load(f) 93 | 94 | # Initializing Parameters 95 | seq = config['Dataset']['sequence'] 96 | batch_size = config['Dataset']['batch_size'] 97 | pose_file = config['Dataset']['path_to_poses'] 98 | calib_file = config['Dataset']['path_to_calibration'] 99 | path_to_scans = config['Dataset']['path_to_scans'] 100 | path_to_gt = config['Dataset']['path_to_gt'] 101 | 102 | nearest_neighbors = config['Octomap']['nearest_neighbors'] 103 | resolution = config['Octomap']['resolution'] 104 | ground_removal = config['Octomap']['ground_removal'] 105 | height_filter = config['Octomap']['height_filter'] 106 | height_threshold = 0.5 107 | octree = octomap.OcTree(resolution) 108 | 109 | hm_resolution = config['Height_Map']['resolution'] 110 | fwd_range = (config['Height_Map']['backward_range'], config['Height_Map']['fwd_range']) 111 | side_range = (config['Height_Map']['right_range'], config['Height_Map']['left_range']) 112 | height_range = (config['Height_Map']['bottom'], config['Height_Map']['top']) 113 | 114 | store_pcd = config['Results']['store_pcd'] 115 | store_individual_label = config['Results']['store_individual_label'] 116 | 117 | poses = utils.load_poses(pose_file) 118 | inv_frame0 = np.linalg.inv(poses[0]) 119 | 120 | # load calibrations 121 | T_cam_velo = utils.load_calib(calib_file) 122 | T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4)) 123 | T_velo_cam = np.linalg.inv(T_cam_velo) 124 | 125 | # convert kitti poses from camera coord to LiDAR coord 126 | new_poses = [] 127 | for pose in poses: 128 | new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo)) 129 | poses = np.array(new_poses) 130 | 131 | # creating batches for the sequence 132 | batches, max_dis = get_batches(poses, batch_size) 133 | print(len(batches), "batches created") 134 | 135 | for batch in batches: 136 | octree.clear() 137 | octree = octomap.OcTree(resolution) 138 | start, end = batch[0], batch[1] 139 | pcds = {} 140 | final_map = o3d.geometry.PointCloud() 141 | start_idx, end_idx = start, end 142 | 143 | print(f"Running OctoMap from {batch[0]} till {batch[1]}") 144 | for scan_no in tqdm(range(start_idx, end_idx)): 145 | 146 | pcd = utils.convert_kitti_bin_to_pcd(f"{path_to_scans}{str(scan_no).zfill(6)}.bin") 147 | pcds[scan_no] = pcd 148 | 149 | # Applying ground segmentation on Point Cloud 150 | ground_pcd, non_ground_pcd, ground_indices, non_ground_indices = \ 151 | segment_ground_multi_res(pcd=pcd, 152 | res=hm_resolution, 153 | fwd_range=fwd_range, 154 | side_range=side_range, 155 | height_range=height_range, 156 | height_threshold=-1.1) 157 | 158 | points = np.asarray(pcd.points) 159 | ground_pcd = ground_pcd.transform(poses[scan_no]) 160 | ground_points = np.asarray(ground_pcd.points) 161 | 162 | ht_filter = np.where(points[:, 2] > height_threshold)[0] 163 | ht_points = points[ht_filter] 164 | 165 | final_map = final_map + pcd.transform(poses[scan_no]) 166 | 167 | pose = poses[scan_no] 168 | 169 | # Inserting Point Cloud into OcTree 170 | octree.insertPointCloud( 171 | pointcloud=points, 172 | origin=np.array([pose[0][3], pose[1][3], pose[2][3]], dtype=float), 173 | maxrange=-1, 174 | ) 175 | 176 | # Setting ground points as static in the Occupancy grid 177 | if ground_removal: 178 | for pt in range(len(ground_points)): 179 | try: 180 | key = octree.coordToKey(ground_points[pt]) 181 | node = octree.search(key) 182 | node.setValue(200.0) 183 | except Exception as e: 184 | print(e) 185 | 186 | # Setting points above a certain height as static in the Occupancy grid 187 | if height_filter: 188 | for pt in range(len(ht_points)): 189 | try: 190 | key = octree.coordToKey(ht_points[pt]) 191 | node = octree.search(key) 192 | node.setValue(200.0) 193 | except Exception as e: 194 | print(e) 195 | 196 | octree.updateInnerOccupancy() 197 | 198 | # Extracting labels from OcTree 199 | final_points = np.asarray(final_map.points) 200 | labels = octree.getLabels(final_points) 201 | 202 | occupied_idx, empty_idx, unknown_idx = [], [], [] 203 | for i in range(len(labels)): 204 | if labels[i] == 1: 205 | occupied_idx.append(i) 206 | elif labels[i] == 0: 207 | empty_idx.append(i) 208 | else: 209 | unknown_idx.append(i) 210 | 211 | pcd_static = final_map.select_by_index(occupied_idx) 212 | pcd_dynamic = final_map.select_by_index(empty_idx) 213 | 214 | color_static = np.full((len(np.asarray(pcd_static.points)), 3), [1.0, 0.0, 0.0]) 215 | color_dynamic = np.full((len(np.asarray(pcd_dynamic.points)), 3), [0.0, 0.0, 1.0]) 216 | 217 | pcd_static.colors = o3d.utility.Vector3dVector(np.asarray(color_static)) 218 | pcd_dynamic.colors = o3d.utility.Vector3dVector(np.asarray(color_dynamic)) 219 | 220 | pcd = pcd_static + pcd_dynamic 221 | pred_tree = o3d.geometry.KDTreeFlann(pcd) 222 | color = np.asarray(pcd.colors) 223 | 224 | # Assigning labels to unknown labels 225 | for pt in unknown_idx: 226 | [_, idx, _] = pred_tree.search_knn_vector_3d(final_points[pt], nearest_neighbors) 227 | final_score = np.mean(color[idx, 0]) 228 | 229 | if final_score > 0.5: 230 | occupied_idx.append(pt) 231 | else: 232 | empty_idx.append(pt) 233 | 234 | pcd_static = final_map.select_by_index(occupied_idx) 235 | pcd_dynamic = final_map.select_by_index(empty_idx) 236 | 237 | # Downsampling static and dynamic point cloud 238 | data = pcd_static.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 239 | [max_dis, max_dis, max_dis]) 240 | pcd_static = data[0] 241 | data = pcd_dynamic.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 242 | [max_dis, max_dis, max_dis]) 243 | 244 | pcd_dynamic = data[0] 245 | data = final_map.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 246 | [max_dis, max_dis, max_dis]) 247 | down_idx = data[1] 248 | down_idx = down_idx[down_idx > 0] 249 | 250 | # Calculating static and dynamic indices for original map 251 | static_idx = list(set(occupied_idx) & set(down_idx)) 252 | dynamic_idx = list(set(empty_idx) & set(down_idx)) 253 | 254 | # Performing evaluation 255 | print("performing eval ...") 256 | ts, td, total_static, total_dynamic = evaluation.eval(static_idx=static_idx, 257 | dynamic_idx=dynamic_idx, 258 | start=start, 259 | end=end, 260 | poses=poses, 261 | path_to_scan=path_to_scans, 262 | path_to_gt=path_to_gt) 263 | print("TS", ts, "Total Static", total_static) 264 | print("TD", td, "Total Dynamic", total_dynamic) 265 | 266 | key = str(start) + "-" + str(end) 267 | res = {key: {"TS": ts, "TD": td, "Total Voxels": total_static, "Total Dynamic Points": total_dynamic, 268 | "Accuracy": (ts + td) / (total_dynamic + total_static + 1e-8), 269 | "Accuracy_1": (ts / (2 * total_static + 1e-8)) + (td / (2 * total_dynamic + 1e-8)), 270 | "Recall": td / (total_dynamic + 1e-8)}} 271 | 272 | # Storing results 273 | json_data = json.dumps(res) 274 | if ground_removal: 275 | f = open(f'./json/{seq}.json', 'a+') 276 | f.write(json_data + "\n") 277 | f.close() 278 | else: 279 | f = open(f'./json/{seq}_orig.json', 'a+') 280 | f.write(json_data + "\n") 281 | f.close() 282 | 283 | # Storing static and dynamic point cloud for individual scans 284 | if store_pcd: 285 | o3d.io.write_point_cloud(f"./pcd/{seq}/static/{int(start)}-{int(end)}.pcd", pcd_static) 286 | o3d.io.write_point_cloud(f"./pcd/{seq}/dynamic/{int(start)}-{int(end)}.pcd", pcd_dynamic) 287 | 288 | # Storing labels for individual scans 289 | if store_individual_label: 290 | print("Storing Scan Wise Labels") 291 | for scan_no in tqdm(range(start, end)): 292 | pcd = pcds[scan_no] 293 | get_scan_wise_labels(octree, pcd, scan_no, nearest_neighbors, seq, ground_removal) 294 | 295 | 296 | if __name__ == '__main__': 297 | main() 298 | -------------------------------------------------------------------------------- /octomap_knn.py: -------------------------------------------------------------------------------- 1 | import octomap 2 | import numpy as np 3 | import open3d as o3d 4 | 5 | import yaml 6 | import json 7 | from tqdm import tqdm 8 | 9 | import utils 10 | from range_image_map import segment_ground_multi_res 11 | import evaluation 12 | from batch_generation import get_batches 13 | 14 | 15 | def get_scan_wise_labels(octree: octomap.OcTree, 16 | scan: o3d.geometry.PointCloud(), 17 | scan_no: int, 18 | nearest_neighbors: int, 19 | seq: int, 20 | ground_removal: bool): 21 | """ 22 | Provides final ground points calculated using certain 23 | resolution and after application of height filter 24 | 25 | Args: 26 | octree (octomap.OcTree): Octree created while generating static and dynamic map 27 | scan (o3d.geometry.PointCloud()): Point Cloud for which labels are required 28 | scan_no(int): Scan number for the particular scan 29 | nearest_neighbors(int): Number of nearest neighbors to be searched for labeling of unknown points 30 | seq(int): Sequence to which scans belong (Parameter to be used for KITTI Dataset) 31 | ground_removal(bool): True -> To apply ground removal 32 | False -> For results without ground removal 33 | """ 34 | 35 | # Extracting labels from OcTree for input scan 36 | points = np.asarray(scan.points) 37 | labels = octree.getLabels(points) 38 | 39 | empty_idx, occupied_idx, unknown_idx = [], [], [] 40 | 41 | for i in range(len(labels)): 42 | if labels[i] == 0: 43 | empty_idx.append(i) 44 | elif labels[i] == 1: 45 | occupied_idx.append(i) 46 | else: 47 | unknown_idx.append(i) 48 | 49 | colors = np.full((len(points), 3), [1.0, 0.0, 0.0]) 50 | colors[empty_idx] = [0.0, 0.0, 1.0] 51 | 52 | scan.colors = o3d.utility.Vector3dVector(np.asarray(colors)) 53 | 54 | known_idx = np.concatenate((occupied_idx, empty_idx), axis=None) 55 | pcd_known = scan.select_by_index(known_idx) 56 | 57 | pred_tree = o3d.geometry.KDTreeFlann(pcd_known) 58 | color = np.asarray(pcd_known.colors) 59 | 60 | static_idx, dynamic_idx = [], [] 61 | 62 | # Assigning labels to unknown labels 63 | for pt in unknown_idx: 64 | [_, idx, _] = pred_tree.search_knn_vector_3d(points[pt], nearest_neighbors) 65 | 66 | final_score = np.mean(color[idx, 0]) 67 | 68 | if final_score > 0.5: 69 | static_idx.append(pt) 70 | else: 71 | dynamic_idx.append(pt) 72 | 73 | static_idx = np.concatenate((occupied_idx, static_idx), axis=None) 74 | dynamic_idx = np.concatenate((empty_idx, dynamic_idx), axis=None) 75 | static_idx = static_idx.astype(np.int32) 76 | dynamic_idx = dynamic_idx.astype(np.int32) 77 | labels = np.full((len(static_idx) + len(dynamic_idx),), 9) 78 | labels[dynamic_idx] = 251 79 | 80 | # Storing labels for input scan 81 | if ground_removal: 82 | file_name = f"./labels/{seq}/{str(scan_no).zfill(6)}" 83 | else: 84 | file_name = f"./labels/{seq}/orig/{str(scan_no).zfill(6)}" 85 | labels.reshape((-1)).astype(np.int32) 86 | labels.tofile(file_name + '.label') 87 | 88 | 89 | def main(): 90 | # Loading Parameters 91 | with open('config.yml') as f: 92 | config = yaml.safe_load(f) 93 | 94 | # Initializing Parameters 95 | seq = config['Dataset']['sequence'] 96 | batch_size = config['Dataset']['batch_size'] 97 | pose_file = config['Dataset']['path_to_poses'] 98 | calib_file = config['Dataset']['path_to_calibration'] 99 | path_to_scans = config['Dataset']['path_to_scans'] 100 | path_to_gt = config['Dataset']['path_to_gt'] 101 | 102 | nearest_neighbors = config['Octomap']['nearest_neighbors'] 103 | resolution = config['Octomap']['resolution'] 104 | ground_removal = config['Octomap']['ground_removal'] 105 | height_filter = config['Octomap']['height_filter'] 106 | height_threshold = 0.5 107 | octree = octomap.OcTree(resolution) 108 | 109 | hm_resolution = config['Height_Map']['resolution'] 110 | fwd_range = (config['Height_Map']['backward_range'], config['Height_Map']['fwd_range']) 111 | side_range = (config['Height_Map']['right_range'], config['Height_Map']['left_range']) 112 | height_range = (config['Height_Map']['bottom'], config['Height_Map']['top']) 113 | 114 | store_pcd = config['Results']['store_pcd'] 115 | store_individual_label = config['Results']['store_individual_label'] 116 | 117 | poses = utils.load_poses(pose_file) 118 | inv_frame0 = np.linalg.inv(poses[0]) 119 | 120 | # load calibrations 121 | T_cam_velo = utils.load_calib(calib_file) 122 | T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4)) 123 | T_velo_cam = np.linalg.inv(T_cam_velo) 124 | 125 | # convert kitti poses from camera coord to LiDAR coord 126 | new_poses = [] 127 | for pose in poses: 128 | new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo)) 129 | poses = np.array(new_poses) 130 | 131 | # creating batches for the sequence 132 | batches, max_dis = get_batches(poses, batch_size) 133 | print(len(batches), "batches created") 134 | 135 | for batch in batches: 136 | octree.clear() 137 | octree = octomap.OcTree(resolution) 138 | start, end = batch[0], batch[1] 139 | pcds = {} 140 | final_map = o3d.geometry.PointCloud() 141 | start_idx, end_idx = start, end 142 | 143 | print(f"Running OctoMap from {batch[0]} till {batch[1]}") 144 | for scan_no in tqdm(range(start_idx, end_idx)): 145 | 146 | pcd = utils.convert_kitti_bin_to_pcd(f"{path_to_scans}{str(scan_no).zfill(6)}.bin") 147 | pcds[scan_no] = pcd 148 | 149 | # Applying ground segmentation on Point Cloud 150 | ground_pcd, non_ground_pcd, ground_indices, non_ground_indices = \ 151 | segment_ground_multi_res(pcd=pcd, 152 | res=hm_resolution, 153 | fwd_range=fwd_range, 154 | side_range=side_range, 155 | height_range=height_range, 156 | height_threshold=-1.1) 157 | 158 | points = np.asarray(pcd.points) 159 | ground_pcd = ground_pcd.transform(poses[scan_no]) 160 | ground_points = np.asarray(ground_pcd.points) 161 | 162 | ht_filter = np.where(points[:, 2] > height_threshold)[0] 163 | ht_points = points[ht_filter] 164 | 165 | final_map = final_map + pcd.transform(poses[scan_no]) 166 | 167 | pose = poses[scan_no] 168 | 169 | # Inserting Point Cloud into OcTree 170 | octree.insertPointCloud( 171 | pointcloud=points, 172 | origin=np.array([pose[0][3], pose[1][3], pose[2][3]], dtype=float), 173 | maxrange=80.0, 174 | ) 175 | 176 | # Setting ground points as static in the Occupancy grid 177 | if ground_removal: 178 | for pt in range(len(ground_points)): 179 | try: 180 | key = octree.coordToKey(ground_points[pt]) 181 | node = octree.search(key) 182 | node.setValue(200.0) 183 | except Exception as e: 184 | print(e) 185 | 186 | # Setting points above a certain height as static in the Occupancy grid 187 | if height_filter: 188 | for pt in range(len(ht_points)): 189 | try: 190 | key = octree.coordToKey(ht_points[pt]) 191 | node = octree.search(key) 192 | node.setValue(200.0) 193 | except Exception as e: 194 | print(e) 195 | 196 | octree.updateInnerOccupancy() 197 | 198 | # Extracting labels from OcTree 199 | final_points = np.asarray(final_map.points) 200 | labels = octree.getLabels(final_points) 201 | 202 | occupied_idx, empty_idx, unknown_idx = [], [], [] 203 | for i in range(len(labels)): 204 | if labels[i] == 1: 205 | occupied_idx.append(i) 206 | elif labels[i] == 0: 207 | empty_idx.append(i) 208 | else : 209 | unknown_idx.append(i) 210 | 211 | pcd_static = final_map.select_by_index(occupied_idx) 212 | pcd_dynamic = final_map.select_by_index(empty_idx) 213 | 214 | color_static = np.full((len(np.asarray(pcd_static.points)), 3), [1.0, 0.0, 0.0]) 215 | color_dynamic = np.full((len(np.asarray(pcd_dynamic.points)), 3), [0.0, 0.0, 1.0]) 216 | 217 | pcd_static.colors = o3d.utility.Vector3dVector(np.asarray(color_static)) 218 | pcd_dynamic.colors = o3d.utility.Vector3dVector(np.asarray(color_dynamic)) 219 | 220 | pcd = pcd_static + pcd_dynamic 221 | pred_tree = o3d.geometry.KDTreeFlann(pcd) 222 | color = np.asarray(pcd.colors) 223 | 224 | # Assigning labels to unknown labels 225 | for pt in unknown_idx: 226 | [_, idx, _] = pred_tree.search_knn_vector_3d(final_points[pt], nearest_neighbors) 227 | final_score = np.mean(color[idx, 0]) 228 | 229 | if final_score > 0.5: 230 | occupied_idx.append(pt) 231 | else: 232 | empty_idx.append(pt) 233 | 234 | pcd_static = final_map.select_by_index(occupied_idx) 235 | pcd_dynamic = final_map.select_by_index(empty_idx) 236 | 237 | # Downsampling static and dynamic point cloud 238 | data = pcd_static.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 239 | [max_dis, max_dis, max_dis]) 240 | pcd_static = data[0] 241 | data = pcd_dynamic.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 242 | [max_dis, max_dis, max_dis]) 243 | 244 | pcd_dynamic = data[0] 245 | data = final_map.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 246 | [max_dis, max_dis, max_dis]) 247 | down_idx = data[1] 248 | down_idx = down_idx[down_idx > 0] 249 | 250 | # Calculating static and dynamic indices for original map 251 | static_idx = list(set(occupied_idx) & set(down_idx)) 252 | dynamic_idx = list(set(empty_idx) & set(down_idx)) 253 | 254 | # Performing evaluation 255 | print("performing eval ...") 256 | ts, td, total_static, total_dynamic = evaluation.eval(static_idx=static_idx, 257 | dynamic_idx=dynamic_idx, 258 | start=start, 259 | end=end, 260 | poses=poses, 261 | path_to_scan=path_to_scans, 262 | path_to_gt=path_to_gt) 263 | print("TS", ts, "Total Static", total_static) 264 | print("TD", td, "Total Dynamic", total_dynamic) 265 | 266 | key = str(start) + "-" + str(end) 267 | res = {key: {"TS": ts, "TD": td, "Total Voxels": total_static, "Total Dynamic Points": total_dynamic, 268 | "Accuracy": (ts + td) / (total_dynamic + total_static + 1e-8), 269 | "Accuracy_1": (ts / (2 * total_static + 1e-8)) + (td / (2 * total_dynamic + 1e-8)), 270 | "Recall": td / (total_dynamic + 1e-8)}} 271 | 272 | # Storing results 273 | json_data = json.dumps(res) 274 | if ground_removal: 275 | f = open(f'./json/{seq}.json', 'a+') 276 | f.write(json_data + "\n") 277 | f.close() 278 | else: 279 | f = open(f'./json/{seq}_orig.json', 'a+') 280 | f.write(json_data + "\n") 281 | f.close() 282 | 283 | # Storing static and dynamic point cloud for individual scans 284 | if store_pcd: 285 | o3d.io.write_point_cloud(f"./pcd/{seq}/static/{int(start)}-{int(end)}.pcd", pcd_static) 286 | o3d.io.write_point_cloud(f"./pcd/{seq}/dynamic/{int(start)}-{int(end)}.pcd", pcd_dynamic) 287 | 288 | # Storing labels for individual scans 289 | if store_individual_label: 290 | print("Storing Scan Wise Labels") 291 | for scan_no in tqdm(range(start, end)): 292 | pcd = pcds[scan_no] 293 | get_scan_wise_labels(octree, pcd, scan_no, nearest_neighbors, seq, ground_removal) 294 | 295 | 296 | if __name__ == '__main__': 297 | main() 298 | -------------------------------------------------------------------------------- /octomap_occupied.py: -------------------------------------------------------------------------------- 1 | import octomap 2 | import numpy as np 3 | import open3d as o3d 4 | 5 | import yaml 6 | import json 7 | from tqdm import tqdm 8 | 9 | import utils 10 | from range_image_map import segment_ground_multi_res 11 | import evaluation 12 | from batch_generation import get_batches 13 | 14 | 15 | def get_scan_wise_labels(octree: octomap.OcTree, 16 | scan: o3d.geometry.PointCloud(), 17 | scan_no: int, 18 | nearest_neighbors: int, 19 | seq: int, 20 | ground_removal: bool): 21 | """ 22 | Provides final ground points calculated using certain 23 | resolution and after application of height filter 24 | 25 | Args: 26 | octree (octomap.OcTree): Octree created while generating static and dynamic map 27 | scan (o3d.geometry.PointCloud()): Point Cloud for which labels are required 28 | scan_no(int): Scan number for the particular scan 29 | nearest_neighbors(int): Number of nearest neighbors to be searched for labeling of unknown points 30 | seq(int): Sequence to which scans belong (Parameter to be used for KITTI Dataset) 31 | ground_removal(bool): True -> To apply ground removal 32 | False -> For results without ground removal 33 | """ 34 | 35 | # Extracting labels from OcTree for input scan 36 | points = np.asarray(scan.points) 37 | labels = octree.getLabels(points) 38 | 39 | empty_idx, occupied_idx, unknown_idx = [], [], [] 40 | 41 | for i in range(len(labels)): 42 | if labels[i] == 0: 43 | empty_idx.append(i) 44 | elif labels[i] == 1: 45 | occupied_idx.append(i) 46 | else: 47 | unknown_idx.append(i) 48 | 49 | colors = np.full((len(points), 3), [1.0, 0.0, 0.0]) 50 | colors[empty_idx] = [0.0, 0.0, 1.0] 51 | 52 | scan.colors = o3d.utility.Vector3dVector(np.asarray(colors)) 53 | 54 | known_idx = np.concatenate((occupied_idx, empty_idx), axis=None) 55 | pcd_known = scan.select_by_index(known_idx) 56 | 57 | pred_tree = o3d.geometry.KDTreeFlann(pcd_known) 58 | color = np.asarray(pcd_known.colors) 59 | 60 | static_idx, dynamic_idx = [], [] 61 | 62 | # Assigning labels to unknown labels 63 | for pt in unknown_idx: 64 | [_, idx, _] = pred_tree.search_knn_vector_3d(points[pt], nearest_neighbors) 65 | 66 | final_score = np.mean(color[idx, 0]) 67 | 68 | if final_score > 0.5: 69 | static_idx.append(pt) 70 | else: 71 | dynamic_idx.append(pt) 72 | 73 | static_idx = np.concatenate((occupied_idx, static_idx), axis=None) 74 | dynamic_idx = np.concatenate((empty_idx, dynamic_idx), axis=None) 75 | static_idx = static_idx.astype(np.int32) 76 | dynamic_idx = dynamic_idx.astype(np.int32) 77 | labels = np.full((len(static_idx) + len(dynamic_idx),), 9) 78 | labels[dynamic_idx] = 251 79 | 80 | # Storing labels for input scan 81 | if ground_removal: 82 | file_name = f"./labels/{seq}/{str(scan_no).zfill(6)}" 83 | else: 84 | file_name = f"./labels/{seq}/orig/{str(scan_no).zfill(6)}" 85 | labels.reshape((-1)).astype(np.int32) 86 | labels.tofile(file_name + '.label') 87 | 88 | 89 | def main(): 90 | # Loading Parameters 91 | with open('config.yml') as f: 92 | config = yaml.safe_load(f) 93 | 94 | # Initializing Parameters 95 | seq = config['Dataset']['sequence'] 96 | batch_size = config['Dataset']['batch_size'] 97 | pose_file = config['Dataset']['path_to_poses'] 98 | calib_file = config['Dataset']['path_to_calibration'] 99 | path_to_scans = config['Dataset']['path_to_scans'] 100 | path_to_gt = config['Dataset']['path_to_gt'] 101 | 102 | nearest_neighbors = config['Octomap']['nearest_neighbors'] 103 | resolution = config['Octomap']['resolution'] 104 | ground_removal = config['Octomap']['ground_removal'] 105 | height_filter = config['Octomap']['height_filter'] 106 | height_threshold = 0.5 107 | octree = octomap.OcTree(resolution) 108 | 109 | hm_resolution = config['Height_Map']['resolution'] 110 | fwd_range = (config['Height_Map']['backward_range'], config['Height_Map']['fwd_range']) 111 | side_range = (config['Height_Map']['right_range'], config['Height_Map']['left_range']) 112 | height_range = (config['Height_Map']['bottom'], config['Height_Map']['top']) 113 | 114 | store_pcd = config['Results']['store_pcd'] 115 | store_individual_label = config['Results']['store_individual_label'] 116 | 117 | poses = utils.load_poses(pose_file) 118 | inv_frame0 = np.linalg.inv(poses[0]) 119 | 120 | # load calibrations 121 | T_cam_velo = utils.load_calib(calib_file) 122 | T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4)) 123 | T_velo_cam = np.linalg.inv(T_cam_velo) 124 | 125 | # convert kitti poses from camera coord to LiDAR coord 126 | new_poses = [] 127 | for pose in poses: 128 | new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo)) 129 | poses = np.array(new_poses) 130 | 131 | # creating batches for the sequence 132 | batches, max_dis = get_batches(poses, batch_size) 133 | print(len(batches), "batches created") 134 | 135 | for batch in batches: 136 | octree.clear() 137 | octree = octomap.OcTree(resolution) 138 | start, end = batch[0], batch[1] 139 | pcds = {} 140 | final_map = o3d.geometry.PointCloud() 141 | start_idx, end_idx = start, end 142 | 143 | print(f"Running OctoMap from {batch[0]} till {batch[1]}") 144 | for scan_no in tqdm(range(start_idx, end_idx)): 145 | 146 | pcd = utils.convert_kitti_bin_to_pcd(f"{path_to_scans}{str(scan_no).zfill(6)}.bin") 147 | pcds[scan_no] = pcd 148 | 149 | # Applying ground segmentation on Point Cloud 150 | ground_pcd, non_ground_pcd, ground_indices, non_ground_indices = \ 151 | segment_ground_multi_res(pcd=pcd, 152 | res=hm_resolution, 153 | fwd_range=fwd_range, 154 | side_range=side_range, 155 | height_range=height_range, 156 | height_threshold=-1.1) 157 | 158 | points = np.asarray(pcd.points) 159 | ground_pcd = ground_pcd.transform(poses[scan_no]) 160 | ground_points = np.asarray(ground_pcd.points) 161 | 162 | ht_filter = np.where(points[:, 2] > height_threshold)[0] 163 | ht_points = points[ht_filter] 164 | 165 | final_map = final_map + pcd.transform(poses[scan_no]) 166 | 167 | pose = poses[scan_no] 168 | 169 | # Inserting Point Cloud into OcTree 170 | octree.insertPointCloud( 171 | pointcloud=points, 172 | origin=np.array([pose[0][3], pose[1][3], pose[2][3]], dtype=float), 173 | maxrange=80.0, 174 | ) 175 | 176 | # Setting ground points as static in the Occupancy grid 177 | if ground_removal: 178 | for pt in range(len(ground_points)): 179 | try: 180 | key = octree.coordToKey(ground_points[pt]) 181 | node = octree.search(key) 182 | node.setValue(200.0) 183 | except Exception as e: 184 | print(e) 185 | 186 | # Setting points above a certain height as static in the Occupancy grid 187 | if height_filter: 188 | for pt in range(len(ht_points)): 189 | try: 190 | key = octree.coordToKey(ht_points[pt]) 191 | node = octree.search(key) 192 | node.setValue(200.0) 193 | except Exception as e: 194 | print(e) 195 | 196 | octree.updateInnerOccupancy() 197 | 198 | # Extracting labels from OcTree 199 | final_points = np.asarray(final_map.points) 200 | labels = octree.getLabels(final_points) 201 | 202 | occupied_idx, empty_idx, unknown_idx = [], [], [] 203 | for i in range(len(labels)): 204 | if labels[i] == 1 or labels[i] == -1: 205 | occupied_idx.append(i) 206 | else: 207 | empty_idx.append(i) 208 | 209 | # pcd_static = final_map.select_by_index(occupied_idx) 210 | # pcd_dynamic = final_map.select_by_index(empty_idx) 211 | 212 | # color_static = np.full((len(np.asarray(pcd_static.points)), 3), [1.0, 0.0, 0.0]) 213 | # color_dynamic = np.full((len(np.asarray(pcd_dynamic.points)), 3), [0.0, 0.0, 1.0]) 214 | 215 | # pcd_static.colors = o3d.utility.Vector3dVector(np.asarray(color_static)) 216 | # pcd_dynamic.colors = o3d.utility.Vector3dVector(np.asarray(color_dynamic)) 217 | 218 | # pcd = pcd_static + pcd_dynamic 219 | # pred_tree = o3d.geometry.KDTreeFlann(pcd) 220 | # color = np.asarray(pcd.colors) 221 | 222 | # # Assigning labels to unknown labels 223 | # for pt in unknown_idx: 224 | # [_, idx, _] = pred_tree.search_knn_vector_3d(final_points[pt], nearest_neighbors) 225 | # final_score = np.mean(color[idx, 0]) 226 | 227 | # if final_score > 0.5: 228 | # occupied_idx.append(pt) 229 | # else: 230 | # empty_idx.append(pt) 231 | 232 | pcd_static = final_map.select_by_index(occupied_idx) 233 | pcd_dynamic = final_map.select_by_index(empty_idx) 234 | 235 | # Downsampling static and dynamic point cloud 236 | data = pcd_static.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 237 | [max_dis, max_dis, max_dis]) 238 | pcd_static = data[0] 239 | data = pcd_dynamic.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 240 | [max_dis, max_dis, max_dis]) 241 | 242 | pcd_dynamic = data[0] 243 | data = final_map.voxel_down_sample_and_trace(0.1, [-1 * max_dis, -1 * max_dis, -1 * max_dis], 244 | [max_dis, max_dis, max_dis]) 245 | down_idx = data[1] 246 | down_idx = down_idx[down_idx > 0] 247 | 248 | # Calculating static and dynamic indices for original map 249 | static_idx = list(set(occupied_idx) & set(down_idx)) 250 | dynamic_idx = list(set(empty_idx) & set(down_idx)) 251 | 252 | # Performing evaluation 253 | print("performing eval ...") 254 | ts, td, total_static, total_dynamic = evaluation.eval(static_idx=static_idx, 255 | dynamic_idx=dynamic_idx, 256 | start=start, 257 | end=end, 258 | poses=poses, 259 | path_to_scan=path_to_scans, 260 | path_to_gt=path_to_gt) 261 | print("TS", ts, "Total Static", total_static) 262 | print("TD", td, "Total Dynamic", total_dynamic) 263 | 264 | key = str(start) + "-" + str(end) 265 | res = {key: {"TS": ts, "TD": td, "Total Voxels": total_static, "Total Dynamic Points": total_dynamic, 266 | "Accuracy": (ts + td) / (total_dynamic + total_static + 1e-8), 267 | "Accuracy_1": (ts / (2 * total_static + 1e-8)) + (td / (2 * total_dynamic + 1e-8)), 268 | "Recall": td / (total_dynamic + 1e-8)}} 269 | 270 | # Storing results 271 | json_data = json.dumps(res) 272 | if ground_removal: 273 | f = open(f'./json/{seq}.json', 'a+') 274 | f.write(json_data + "\n") 275 | f.close() 276 | else: 277 | f = open(f'./json/{seq}_orig.json', 'a+') 278 | f.write(json_data + "\n") 279 | f.close() 280 | 281 | # Storing static and dynamic point cloud for individual scans 282 | if store_pcd: 283 | o3d.io.write_point_cloud(f"./pcd/{seq}/static/{int(start)}-{int(end)}.pcd", pcd_static) 284 | o3d.io.write_point_cloud(f"./pcd/{seq}/dynamic/{int(start)}-{int(end)}.pcd", pcd_dynamic) 285 | 286 | # Storing labels for individual scans 287 | if store_individual_label: 288 | print("Storing Scan Wise Labels") 289 | for scan_no in tqdm(range(start, end)): 290 | pcd = pcds[scan_no] 291 | get_scan_wise_labels(octree, pcd, scan_no, nearest_neighbors, seq, ground_removal) 292 | 293 | 294 | if __name__ == '__main__': 295 | main() 296 | -------------------------------------------------------------------------------- /range_image_map.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import open3d as o3d 4 | 5 | 6 | def point_cloud_2_birdseye(points, 7 | res=0.05, 8 | side_range=(-50., 50.), 9 | fwd_range=(-50., 50.), 10 | height_range=(-2., 1.5)): 11 | """ 12 | Creates HeightMap from the given set of points. 13 | 14 | Args: 15 | points (numpy array nx3): Points for which a range image is required to be generated 16 | res (float): Resolution to be used for generating range image. 17 | fwd_range(tuple(float, float)): x_value for which points are to be considered. 18 | side_range(tuple(float, float)): y_value for which points are to be considered. 19 | height_range(tuple(float, float)): z_value for which points are to be considered. 20 | 21 | Returns: 22 | im: HeightMap 23 | p2c: Mapping of points from HeightMap to Point Cloud. 24 | """ 25 | 26 | # Separating out x, y and z coordinates 27 | x_points = points[:, 0] 28 | y_points = points[:, 1] 29 | z_points = points[:, 2] 30 | 31 | # Applying horizontal, and vertical filters 32 | f_filt = np.logical_and((x_points > fwd_range[0]), (x_points < fwd_range[1])) 33 | s_filt = np.logical_and((y_points > -side_range[1]), (y_points < -side_range[0])) 34 | filter = np.logical_and(f_filt, s_filt) 35 | indices = np.argwhere(filter).flatten() 36 | 37 | # Extracting points following the filter above created 38 | x_points = x_points[indices] 39 | y_points = y_points[indices] 40 | z_points = z_points[indices] 41 | points = points[indices] 42 | 43 | x_img = (-y_points / res).astype(np.int32) # x axis is -y in LIDAR 44 | y_img = (-x_points / res).astype(np.int32) # y axis is -x in LIDAR 45 | 46 | x_img -= int(np.floor(side_range[0] / res)) 47 | y_img += int(np.ceil(fwd_range[1] / res)) 48 | 49 | # Clipping values according to the height range specified 50 | pixel_values = np.clip(a=z_points, 51 | a_min=height_range[0], 52 | a_max=height_range[1]) 53 | 54 | # Creating a pixel to point mapping 55 | p2c = {} 56 | for k, v in zip(points, zip(y_img, x_img)): 57 | p2c[k.tobytes()] = v 58 | 59 | # Scaling pixel values from 0-255 60 | pixel_values = (((pixel_values - height_range[0]) / 61 | float(height_range[1] - height_range[0])) * 255).astype(np.uint8) 62 | 63 | x_max = 1 + int((side_range[1] - side_range[0]) / res) 64 | y_max = 1 + int((fwd_range[1] - fwd_range[0]) / res) 65 | im = np.zeros([y_max, x_max], dtype=np.uint8) 66 | 67 | im[y_img, x_img] = pixel_values 68 | 69 | return im, p2c 70 | 71 | 72 | def segment_ground_indices_ht(pcd, 73 | res, 74 | fwd_range, 75 | side_range, 76 | height_range, 77 | height_threshold): 78 | 79 | """ 80 | Provides probable ground points calculated using certain 81 | resolution and after application of height filter 82 | 83 | Args: 84 | pcd (o3d.geometry.PointCloud()): Point cloud for which ground points are required 85 | res (float): Resolution to be used for generating range image. 86 | fwd_range(tuple(float, float)): x_value for which points are to be considered. 87 | side_range(tuple(float, float)): y_value for which points are to be considered. 88 | height_range(tuple(float, float)): z_value for which points are to be considered. 89 | height_threshold(float): Height threshold for correcting wrongly classified non-ground points 90 | 91 | Returns: 92 | ground_pcd: Point Cloud consisting of only ground points 93 | non_ground_pcd: Point Cloud consisting of non-ground points 94 | ground_indices: Indices of points that belong to ground from original Point Cloud 95 | non_ground_indices: Indices of points that belong to non-ground from original Point Cloud 96 | 97 | """ 98 | 99 | # Creating HeightMap from point cloud 100 | points = np.asarray(pcd.points) 101 | img, p2c = point_cloud_2_birdseye(points, res=res, side_range=side_range, fwd_range=fwd_range, 102 | height_range=height_range) 103 | img = np.array(img) 104 | 105 | # Applying Canny Edge Detector 106 | CANNY_THRESH_1 = 15 107 | CANNY_THRESH_2 = 300 108 | 109 | edges = cv2.Canny(img, CANNY_THRESH_1, CANNY_THRESH_2) 110 | edges = cv2.dilate(edges, None) 111 | 112 | colors = np.full((len(points), 3), [1.0, 0.0, 0.0]) 113 | ground_indices = [] 114 | 115 | # Calculating ground indices for input scan 116 | for i in range(len(points)): 117 | if points[i].tobytes() in p2c.keys(): 118 | y, x = p2c[points[i].tobytes()] 119 | if edges[y, x] == 0: 120 | ground_indices.append(i) 121 | 122 | colors[ground_indices] = [0.0, 0.0, 0.0] 123 | colors = np.asarray(colors) 124 | 125 | pcd.colors = o3d.utility.Vector3dVector(np.asarray(colors)) 126 | ground_indices = [] 127 | non_ground_indices = [] 128 | 129 | for i in range(len(colors)): 130 | if list(colors[i]) == [0.0, 0.0, 0.0]: 131 | ground_indices.append(i) 132 | else: 133 | non_ground_indices.append(i) 134 | 135 | ground_points = points[ground_indices] 136 | 137 | # Applying height threshold on ground points detected above 138 | ht_filter = np.where(ground_points[:, 2] > height_threshold)[0] 139 | 140 | # Reverting non-ground points back from ground points 141 | non_ground_ext = np.asarray(ground_indices)[ht_filter] 142 | non_ground_indices.extend(non_ground_ext) 143 | 144 | ground_indices = np.delete(ground_indices, ht_filter) 145 | 146 | ground_pcd = pcd.select_by_index(ground_indices) 147 | non_ground_pcd = pcd.select_by_index(ground_indices, invert=True) 148 | 149 | return ground_pcd, non_ground_pcd, ground_indices, non_ground_indices 150 | 151 | 152 | def segment_ground_multi_res(pcd, 153 | res, 154 | fwd_range, 155 | side_range, 156 | height_range, 157 | height_threshold): 158 | """ 159 | Provides final ground points calculated using certain 160 | resolution and after application of height filter 161 | 162 | Args: 163 | pcd (o3d.geometry.PointCloud()): Point cloud for which ground points are required 164 | res (List: float): List of resolutions to be used for generating range image. 165 | fwd_range(tuple(float, float)): x_value for which points are to be considered. 166 | side_range(tuple(float, float)): y_value for which points are to be considered. 167 | height_range(tuple(float, float)): z_value for which points are to be considered. 168 | height_threshold(float): Height threshold for correcting wrongly classified non-ground points 169 | 170 | Returns: 171 | ground_pcd: Point Cloud consisting of only ground points 172 | non_ground_pcd: Point Cloud consisting of non-ground points 173 | ground_indices: Indices of points that belong to ground from original Point Cloud 174 | non_ground_indices: Indices of points that belong to non-ground from original Point Cloud 175 | 176 | """ 177 | 178 | # res = [0.01, 0.05, 0.09, 0.16, 0.25] 2 179 | # res = [0.01, 0.03, 0.05, 0.07, 0.09] # 3 --best 180 | # res = [0.01, 0.05, 0.09, 0.16, 0.25, 0.38] #1 181 | # res = [0.01, 0.03, 0.05, 0.07, 0.09, 0.12] #4 182 | # res = [0.01, 0.03, 0.04] 183 | 184 | points = np.asarray(pcd.points) 185 | freq = np.full((len(points),), 0) 186 | 187 | # Applying HeightMap based ground segmentation for multiple resolutions 188 | for resolution in res: 189 | ground, non_ground, ground_indices, non_ground_indices = segment_ground_indices_ht(pcd, 190 | resolution, 191 | fwd_range, 192 | side_range, 193 | height_range, 194 | height_threshold) 195 | freq[ground_indices] = freq[ground_indices] + 1 196 | 197 | freq = freq / len(res) 198 | ground_indices = np.where(freq >= 0.5)[0] 199 | non_ground_indices = np.where(freq < 0.5)[0] 200 | 201 | ground_pcd = pcd.select_by_index(ground_indices) 202 | non_ground_pcd = pcd.select_by_index(ground_indices, invert=True) 203 | 204 | return ground_pcd, non_ground_pcd, ground_indices, non_ground_indices 205 | -------------------------------------------------------------------------------- /time_utils.py: -------------------------------------------------------------------------------- 1 | # @file time_utils.py 2 | # @author Ignacio Vizzo [ivizzo@uni-bonn.de] 3 | # 4 | # Copyright (c) 2019 Ignacio Vizzo, all rights reserved 5 | 6 | import time 7 | 8 | 9 | def timeit(f): 10 | 11 | def wrap(*args, **kargs): 12 | time1 = time.time() 13 | ret = f(*args, **kargs) 14 | time2 = time.time() 15 | print('{:s} function took {:.3f} ms'.format(f.__name__, 16 | (time2 - time1) * 1000.0)) 17 | 18 | return ret 19 | 20 | return wrap 21 | -------------------------------------------------------------------------------- /utility/compile_res.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | # Sequences for which result is required to be compiled 6 | sequences = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] 7 | 8 | for i in sequences: 9 | 10 | seq = [] 11 | 12 | with open(f"../json/{i}.json", "r") as f: 13 | for line in f : 14 | seq.append(json.loads(line)) 15 | 16 | 17 | # Calculating total correctly classified static points 18 | TS = [val[str(list(val.keys())[0])]["TS"] for val in seq] 19 | 20 | # Calculating total static points present in ground truth 21 | total_static = [val[str(list(val.keys())[0])]["Total Voxels"] for val in seq] 22 | 23 | # Calculating total correctly classified dynamic points 24 | TD = [val[str(list(val.keys())[0])]["TD"] for val in seq] 25 | 26 | # Calculating total dynamic points present in ground truth 27 | total_dynamic = [val[str(list(val.keys())[0])]["Total Dynamic Points"] for val in seq] 28 | 29 | # Aggregatign the stats 30 | TS = np.sum(TS) 31 | total_static = np.sum(total_static) 32 | 33 | TD = np.sum(TD) 34 | total_dynamic = np.sum(total_dynamic) 35 | 36 | # Calculting accuracy and dynamic recall 37 | accuracy_our = ((TS/total_static) + (TD/(total_dynamic + 1e-8)))/2 38 | recall_our = TD/(total_dynamic + 1e-8) 39 | 40 | # Printing results 41 | print(i) 42 | print("accuracy ", accuracy_our) 43 | print("recall", recall_our) -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import open3d as o3d 2 | import numpy as np 3 | from time_utils import timeit 4 | 5 | def load_label(label_path): 6 | label = np.fromfile(label_path, dtype=np.int32) 7 | label = label.reshape((-1)) 8 | 9 | return label 10 | 11 | def convert_kitti_bin_to_pcd(binFilePath): 12 | bin_pcd = np.fromfile(binFilePath, dtype=np.float32) 13 | #print(type(bin_pcd)) 14 | points = bin_pcd.reshape((-1, 4))[:, 0:3] 15 | o3d_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points)) 16 | return o3d_pcd 17 | 18 | 19 | """ Using dpeth as a filter to label points as static or dynamic 20 | Currently using 40m as a threshold 21 | """ 22 | 23 | def depth_filtering(gt_pcd, pred_label, thresh=40): 24 | gt_points = np.asarray(gt_pcd.points) 25 | 26 | for i in range(len(gt_points)): 27 | dist = np.sqrt(gt_points[i][0]**2 + gt_points[i][1]**2 + gt_points[i][2]**2) 28 | if dist > thresh: 29 | pred_label[i] = 9 30 | 31 | return pred_label 32 | 33 | 34 | """ Using height as a filter to label points as static or dynamic 35 | Currently using 0.3m as a threshold 36 | """ 37 | 38 | def height_filtering(gt_pcd, pred_label, thresh=0.3): 39 | gt_points = np.asarray(gt_pcd.points) 40 | 41 | pred_label[gt_points[:, 2] > thresh] = 9 42 | 43 | return pred_label 44 | 45 | 46 | 47 | # Function to calculate the metrics for each scan 48 | 49 | def get_indices(gt_label, pred_label): 50 | tp_idx, tn_idx, fp_idx, fn_idx = [], [], [], [] 51 | 52 | for i in range(len(pred_label)): 53 | if pred_label[i] == gt_label[i] == 9: 54 | tp_idx.append(i) 55 | elif pred_label[i] == gt_label[i] == 251: 56 | tn_idx.append(i) 57 | elif pred_label[i] == 9 and gt_label[i] == 251: 58 | fp_idx.append(i) 59 | elif pred_label[i] == 251 and gt_label[i] == 9: 60 | fn_idx.append(i) 61 | 62 | return len(tp_idx), len(fp_idx), len(tn_idx), len(fn_idx) 63 | 64 | 65 | 66 | def create_range_image(points, labels, type_, seq, proj_fov_up=3.0, proj_fov_down=-25.0, proj_W=1024, proj_H=64): 67 | 68 | proj_range = np.full((proj_H, proj_W), -1, 69 | dtype=np.float32) 70 | 71 | # unprojected range (list of depths for each point) 72 | unproj_range = np.zeros((0, 1), dtype=np.float32) 73 | 74 | # projected point cloud xyz - [H,W,3] xyz coord (-1 is no data) 75 | proj_xyz = np.full((proj_H, proj_W, 3), -1, 76 | dtype=np.float32) 77 | 78 | # projected remission - [H,W] intensity (-1 is no data) 79 | proj_remission = np.full((proj_H, proj_W), -1, 80 | dtype=np.float32) 81 | 82 | proj_labels = np.full((proj_H, proj_W, 3), -1, 83 | dtype=np.float32) 84 | 85 | # projected index (for each pixel, what I am in the pointcloud) 86 | # [H,W] index (-1 is no data) 87 | proj_idx = np.full((proj_H, proj_W), -1, 88 | dtype=np.int32) 89 | 90 | # for each point, where it is in the range image 91 | proj_x = np.zeros((0, 1), dtype=np.float32) # [m, 1]: x 92 | proj_y = np.zeros((0, 1), dtype=np.float32) # [m, 1]: y 93 | 94 | # mask containing for each pixel, if it contains a point or not 95 | proj_mask = np.zeros((proj_H, proj_W), 96 | dtype=np.int32) 97 | 98 | fov_up = proj_fov_up / 180.0 * np.pi # field of view up in rad 99 | fov_down = proj_fov_down / 180.0 * np.pi # field of view down in rad 100 | fov = abs(fov_down) + abs(fov_up) # get field of view total in rad 101 | 102 | # get depth of all points 103 | depth = np.linalg.norm(points, 2, axis=1) 104 | 105 | # get scan components 106 | scan_x = points[:, 0] 107 | scan_y = points[:, 1] 108 | scan_z = points[:, 2] 109 | 110 | # get angles of all points 111 | yaw = -np.arctan2(scan_y, scan_x) 112 | pitch = np.arcsin(scan_z / depth) 113 | 114 | # get projections in image coords 115 | proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0] 116 | proj_y = 1.0 - (pitch + abs(fov_down)) / fov # in [0.0, 1.0] 117 | 118 | # scale to image size using angular resolution 119 | proj_x *= proj_W # in [0.0, W] 120 | proj_y *= proj_H # in [0.0, H] 121 | 122 | # round and clamp for use as index 123 | proj_x = np.floor(proj_x) 124 | proj_x = np.minimum(proj_W - 1, proj_x) 125 | proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1] 126 | 127 | proj_y = np.floor(proj_y) 128 | proj_y = np.minimum(proj_H - 1, proj_y) 129 | proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1] 130 | 131 | 132 | # copy of depth in original order 133 | unproj_range = np.copy(depth) 134 | 135 | # order in decreasing depth 136 | indices = np.arange(depth.shape[0]) 137 | order = np.argsort(depth)[::-1] 138 | depth = depth[order] 139 | indices = indices[order] 140 | points = points[order] 141 | proj_y = proj_y[order] 142 | proj_x = proj_x[order] 143 | labels = labels[order] 144 | 145 | p2c = {} 146 | for k, v in zip(points, zip(proj_y, proj_x)): 147 | p2c[k.tobytes()] = v 148 | 149 | label_mapping = {} 150 | for k, v in zip(zip(proj_y, proj_x), labels): 151 | label_mapping[k] = v 152 | # assing to images 153 | 154 | int_label = [] 155 | for i in range(len(labels)): 156 | int_label.append([int(l) for l in labels[i]]) 157 | 158 | proj_int_labels = np.full((proj_H, proj_W, 3), -1, 159 | dtype=np.uint8) 160 | 161 | proj_range[proj_y, proj_x] = depth 162 | proj_xyz[proj_y, proj_x] = points 163 | proj_idx[proj_y, proj_x] = indices 164 | proj_labels[proj_y, proj_x] = labels 165 | proj_int_labels[proj_y, proj_x] = int_label 166 | proj_mask = (proj_idx > 0).astype(np.float32) 167 | 168 | 169 | return proj_xyz, proj_labels, p2c, label_mapping 170 | 171 | 172 | def load_calib(calib_path): 173 | T_cam_velo = [] 174 | try: 175 | with open(calib_path, 'r') as f: 176 | lines = f.readlines() 177 | for line in lines: 178 | if 'Tr:' in line: 179 | line = line.replace('Tr:', '') 180 | T_cam_velo = np.fromstring(line, dtype=float, sep=' ') 181 | T_cam_velo = T_cam_velo.reshape(3, 4) 182 | T_cam_velo = np.vstack((T_cam_velo, [0, 0, 0, 1])) 183 | 184 | except FileNotFoundError: 185 | print('Calibrations are not avaialble.') 186 | 187 | return np.array(T_cam_velo) 188 | 189 | 190 | def load_poses(pose_path): 191 | 192 | # Read and parse the poses 193 | poses = [] 194 | try: 195 | if '.txt' in pose_path: 196 | with open(pose_path, 'r') as f: 197 | lines = f.readlines() 198 | for line in lines: 199 | T_w_cam0 = np.fromstring(line, dtype=float, sep=' ') 200 | T_w_cam0 = T_w_cam0.reshape(3, 4) 201 | T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1])) 202 | poses.append(T_w_cam0) 203 | else: 204 | poses = np.load(pose_path)['arr_0'] 205 | except FileNotFoundError: 206 | print('Ground truth poses are not avaialble.') 207 | 208 | return np.array(poses) 209 | 210 | 211 | def custom_draw_geometry_with_key_callback(pcd): 212 | 213 | def change_background_to_black(vis): 214 | opt = vis.get_render_option() 215 | opt.background_color = np.asarray([0, 0, 0]) 216 | return False 217 | 218 | def load_render_option(vis): 219 | vis.get_render_option().load_from_json( 220 | "../../TestData/renderoption.json") 221 | return False 222 | 223 | def capture_depth(vis): 224 | depth = vis.capture_depth_float_buffer() 225 | plt.imshow(np.asarray(depth)) 226 | plt.show() 227 | return False 228 | 229 | def capture_image(vis): 230 | image = vis.capture_screen_float_buffer() 231 | plt.imshow(np.asarray(image)) 232 | plt.show() 233 | return False 234 | 235 | key_to_callback = {} 236 | key_to_callback[ord("K")] = change_background_to_black 237 | key_to_callback[ord("R")] = load_render_option 238 | key_to_callback[ord(",")] = capture_depth 239 | key_to_callback[ord(".")] = capture_image 240 | o3d.visualization.draw_geometries_with_key_callbacks([pcd], key_to_callback) 241 | 242 | 243 | --------------------------------------------------------------------------------