├── media ├── treescope.gif └── treescope.png ├── diameter_estimation ├── yaml │ ├── VAT-0723-predictions.yaml │ ├── VAT-0723-DBCRE.yaml │ └── VAT-0723-SLOAM.yaml ├── evaluate_dbh_rmse.py └── process_tree_node.py ├── semantic_segmentation ├── h5-to-labels.py ├── labels-to-h5.py ├── evaluate_iou.py └── full_data_preprocessor.py └── README.md /media/treescope.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KumarRobotics/treescope/HEAD/media/treescope.gif -------------------------------------------------------------------------------- /media/treescope.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KumarRobotics/treescope/HEAD/media/treescope.png -------------------------------------------------------------------------------- /diameter_estimation/yaml/VAT-0723-predictions.yaml: -------------------------------------------------------------------------------- 1 | VAT-0723M-01: 2 | tree11: 3 | tree12: 4 | tree13: 5 | tree15: 6 | tree24: 7 | tree31: 8 | tree32: 9 | tree38: 10 | tree40: 11 | tree50: 12 | tree58: 13 | VAT-0723M-02: 14 | tree7: 15 | tree28: 16 | tree29: 17 | tree30: 18 | tree34: 19 | tree38: 20 | tree39: 21 | VAT-0723M-03: 22 | tree2: 23 | tree4: 24 | tree7: 25 | tree8: 26 | tree10: 27 | tree11: 28 | tree13: 29 | tree14: 30 | tree22: 31 | tree29: 32 | tree31: 33 | tree35: 34 | tree41: 35 | tree50: 36 | tree52: 37 | VAT-0723M-04: 38 | tree3: 39 | tree5: 40 | tree9: 41 | tree11: 42 | tree47: 43 | tree48: 44 | tree60: 45 | VAT-0723M-05: 46 | tree3: 47 | tree4: 48 | tree5: 49 | tree8: 50 | tree10: 51 | tree12: 52 | tree13: 53 | tree18: 54 | tree23: 55 | tree34: 56 | tree38: 57 | tree39: 58 | tree40: 59 | tree41: 60 | tree42: 61 | tree43: 62 | tree57: 63 | tree96: 64 | tree97: 65 | tree103: 66 | tree108: 67 | tree116: 68 | tree118: 69 | tree122: 70 | tree123: 71 | tree127: 72 | VAT-0723M-06: 73 | tree0: 74 | tree1: 75 | tree2: 76 | tree5: 77 | tree6: 78 | tree14: 79 | tree18: 80 | tree19: 81 | tree22: 82 | tree24: 83 | tree25: -------------------------------------------------------------------------------- /diameter_estimation/yaml/VAT-0723-DBCRE.yaml: -------------------------------------------------------------------------------- 1 | VAT-0723M-01: 2 | tree11: 0.315 3 | tree12: 0.349 4 | tree13: 0.364 5 | tree15: 0.301 6 | tree24: 0.243 7 | tree31: 0.201 8 | tree32: 0.305 9 | tree38: 0.286 10 | tree40: 0.269 11 | tree50: 0.239 12 | tree58: 0.274 13 | VAT-0723M-02: 14 | tree7: 0.406 15 | tree28: 0.302 16 | tree29: 0.307 17 | tree30: 0.386 18 | tree34: 0.351 19 | tree38: 0.293 20 | tree39: 0.319 21 | VAT-0723M-03: 22 | tree2: 0.336 23 | tree4: 0.276 24 | tree7: 0.233 25 | tree8: 0.345 26 | tree10: 0.33 27 | tree11: 0.225 28 | tree13: 0.346 29 | tree14: 0.298 30 | tree22: 0.366 31 | tree29: 0.265 32 | tree31: 0.299 33 | tree35: 0.246 34 | tree41: 0.194 35 | tree50: 0.342 36 | tree52: 0.226 37 | VAT-0723M-04: 38 | tree3: 0.207 39 | tree5: 0.218 40 | tree9: 0.249 41 | tree11: 0.336 42 | tree47: 0.278 43 | tree48: 0.258 44 | tree60: 0.419 45 | VAT-0723M-05: 46 | tree3: 0.243 47 | tree4: 0.285 48 | tree5: 0.378 49 | tree8: 0.330 50 | tree10: 0.363 51 | tree12: 0.311 52 | tree13: 0.301 53 | tree18: 0.450 54 | tree23: 0.236 55 | tree34: 0.290 56 | tree38: 0.386 57 | tree39: 0.252 58 | tree40: 0.327 59 | tree41: 0.360 60 | tree42: 0.407 61 | tree43: 0.378 62 | tree57: 0.303 63 | tree96: 0.288 64 | tree97: 0.314 65 | tree103: 0.400 66 | tree108: 0.341 67 | tree116: 0.321 68 | tree118: 0.294 69 | tree122: 0.309 70 | tree123: 0.350 71 | tree127: 0.323 72 | VAT-0723M-06: 73 | tree0: 0.252 74 | tree1: 0.245 75 | tree2: 0.27 76 | tree5: 0.274 77 | tree6: 0.333 78 | tree14: 0.318 79 | tree18: 0.238 80 | tree19: 0.316 81 | tree22: 0.208 82 | tree24: 0.235 83 | tree25: 0.255 -------------------------------------------------------------------------------- /diameter_estimation/yaml/VAT-0723-SLOAM.yaml: -------------------------------------------------------------------------------- 1 | VAT-0723M-01: 2 | tree11: 0.385 3 | tree12: 0.402 4 | tree13: 0.399 5 | tree15: 0.385 6 | tree24: 0.332 7 | tree31: 0.353 8 | tree32: 0.482 9 | tree38: 0.367 10 | tree40: 0.331 11 | tree50: 0.388 12 | tree58: 0.412 13 | VAT-0723M-02: 14 | tree7: 0.426 15 | tree28: 0.388 16 | tree29: 0.402 17 | tree30: 0.448 18 | tree34: 0.382 19 | tree38: 0.393 20 | tree39: 0.401 21 | VAT-0723M-03: 22 | tree2: 0.398 23 | tree4: 0.342 24 | tree7: 0.285 25 | tree8: 0.387 26 | tree10: 0.402 27 | tree11: 0.353 28 | tree13: 0.396 29 | tree14: 0.401 30 | tree22: 0.406 31 | tree29: 0.385 32 | tree31: 0.344 33 | tree35: 0.302 34 | tree41: 0.318 35 | tree50: 0.375 36 | tree52: 0.322 37 | VAT-0723M-04: 38 | tree3: 0.302 39 | tree5: 0.298 40 | tree9: 0.276 41 | tree11: 0.402 42 | tree47: 0.299 43 | tree48: 0.288 44 | tree60: 0.402 45 | VAT-0723M-05: 46 | tree3: 0.0011 47 | tree4: 0.0016 48 | tree5: 0.0025 49 | tree8: 0.0012 50 | tree10: 0.0012 51 | tree12: 0.0016 52 | tree13: 0.0026 53 | tree18: 0.0084 54 | tree23: 0.0008 55 | tree34: 0.0018 56 | tree38: 0.0051 57 | tree39: 0.0094 58 | tree40: 0.0026 59 | tree41: 0.0016 60 | tree42: 0.0024 61 | tree43: 0.0037 62 | tree57: 0.0045 63 | tree96: 0.0064 64 | tree97: 0.0016 65 | tree103: 0.0084 66 | tree108: 0.0028 67 | tree116: 0.0030 68 | tree118: 0.0038 69 | tree122: 0.0033 70 | tree123: 0.0061 71 | tree127: 0.0036 72 | VAT-0723M-06: 73 | tree0: 0.356 74 | tree1: 0.329 75 | tree2: 0.374 76 | tree5: 0.299 77 | tree6: 0.357 78 | tree14: 0.368 79 | tree18: 0.351 80 | tree19: 0.364 81 | tree22: 0.275 82 | tree24: 0.293 83 | tree25: 0.312 -------------------------------------------------------------------------------- /diameter_estimation/evaluate_dbh_rmse.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | import yaml 4 | import numpy as np 5 | import csv 6 | 7 | if len(sys.argv) < 3: 8 | print("Usage: python3 evaluate_dbh_rmse.py [output.csv]") 9 | sys.exit(1) 10 | 11 | dataset_json_path = sys.argv[1] 12 | predictions_yaml_path = sys.argv[2] 13 | dataset = dataset_json_path.replace("-gt.json", "") 14 | 15 | if len(sys.argv) > 3: 16 | csv_file = sys.argv[3] 17 | else: 18 | csv_file = "results_" + dataset + ".csv" 19 | 20 | with open(dataset_json_path, 'r') as json_file: 21 | json_data = json.load(json_file) 22 | 23 | with open(predictions_yaml_path, 'r') as yaml_file: 24 | yaml_data = yaml.safe_load(yaml_file) 25 | 26 | tree_ids = [] 27 | dbh_gt = [] 28 | dbh_est = [] 29 | 30 | for key in yaml_data.keys(): 31 | for tree in yaml_data[key]: 32 | tree_key = key+"_"+tree 33 | if tree_key in json_data: 34 | dbh_gt.append(json_data[tree_key]['DBH']) 35 | dbh_est.append(yaml_data[key][tree]) 36 | tree_ids.append(tree_key) 37 | 38 | dbh_gt = np.array(dbh_gt) 39 | dbh_est = np.array(dbh_est) 40 | tree_ids = np.array(tree_ids) 41 | 42 | diff = dbh_gt - dbh_est 43 | mae = np.mean(np.abs(diff)) 44 | rmse = np.sqrt(np.mean((diff)**2)) 45 | mean_gt = np.mean(dbh_gt) 46 | mean_est = np.mean(dbh_est) 47 | print(f"Dataset {dataset}, Mean DBH GT {mean_gt:.4f}, Mean DBH Estimate {mean_est:.4f}, Mean Abs Err {mae:.4f}, RMSE {rmse:.4f}") 48 | 49 | # Save tree_ids, dbh_gt, dbh_est, mae, rmse 50 | data = [] 51 | data.append(["Tree ID", "DBH GT", "DBH Est", "Error"]) 52 | 53 | combined_data = np.column_stack((tree_ids, dbh_gt, dbh_est, np.round(diff,4))).tolist() 54 | data.extend(combined_data) 55 | 56 | # Write the data to the CSV file 57 | with open(csv_file, mode='w', newline='') as file: 58 | writer = csv.writer(file) 59 | writer.writerows(data) -------------------------------------------------------------------------------- /semantic_segmentation/h5-to-labels.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import open3d as o3d 3 | import h5py 4 | import os 5 | import argparse 6 | 7 | # Parse command line arguments 8 | parser = argparse.ArgumentParser(description="Unpack HDF5 files into NPY and PCD files.") 9 | parser.add_argument('--file', type=str, required=True, help='Path to the input HDF5 file.') 10 | parser.add_argument('--output', type=str, required=True, help='Directory to save the output files.') 11 | parser.add_argument('--visualize', action='store_true', help='Visualize the resulting point clouds.') 12 | args = parser.parse_args() 13 | 14 | def unpack_hdf5(hdf5_path, output_path, visualize=False): 15 | with h5py.File(hdf5_path, 'r') as f: 16 | for time_str in f.keys(): 17 | group = f[time_str] 18 | tree_pts = group['tree_trunks_' + time_str][:] 19 | ground_pts = group['ground_' + time_str][:] 20 | 21 | # labels: trees as 8, ground as 1 22 | labels = np.zeros((tree_pts.shape[0] + ground_pts.shape[0],), dtype=int) 23 | labels[:tree_pts.shape[0]] = 8 # Label for trees 24 | labels[tree_pts.shape[0]:] = 1 # Label for ground 25 | 26 | # Save labels as .npy 27 | npy_path = os.path.join(output_path, 'converted_labels', 'label_' + time_str + '.npy') 28 | os.makedirs(os.path.dirname(npy_path), exist_ok=True) 29 | np.save(npy_path, labels) 30 | 31 | # Merge tree and ground points for the point cloud 32 | all_pts = np.vstack((tree_pts, ground_pts)) 33 | pcd = o3d.geometry.PointCloud() 34 | pcd.points = o3d.utility.Vector3dVector(all_pts) 35 | 36 | # Save point cloud as .pcd 37 | pcd_path = os.path.join(output_path, 'converted_scans', 'point_cloud_' + time_str + '.pcd') 38 | os.makedirs(os.path.dirname(pcd_path), exist_ok=True) 39 | o3d.io.write_point_cloud(pcd_path, pcd) 40 | 41 | # Optionally visualize the point clouds 42 | if visualize: 43 | o3d.visualization.draw_geometries([pcd]) 44 | 45 | unpack_hdf5(args.file, args.output, args.visualize) 46 | -------------------------------------------------------------------------------- /semantic_segmentation/labels-to-h5.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import open3d as o3d 3 | import os 4 | import argparse 5 | import h5py 6 | 7 | # Usage: python labels-to-h5.py -D 8 | 9 | # Parse command line arguments 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument("--data", "-D", required=True, help="The main dataset directory where the labeled data is stored with the final '/' included") 12 | parser.add_argument('--visualize', action='store_true', help='Visualize the resulting point cloud') 13 | args = parser.parse_args() 14 | 15 | dataset = args.data 16 | visualize = args.visualize 17 | 18 | # Create the semantic_labels directory if it doesn't exist 19 | dataset = dataset.rstrip('/') + '/' 20 | h5_name = dataset.rstrip('/').split('/')[-1] 21 | semantic_dir = dataset + "semantic_labels/" 22 | if not os.path.exists(semantic_dir): 23 | os.makedirs(semantic_dir) 24 | 25 | # Check if the HDF5 file exists in the dataset folder 26 | hdf5_output_path = os.path.join(dataset, f"{h5_name}.h5") 27 | f = h5py.File(hdf5_output_path, 'a') 28 | 29 | # Print all groups and datasets within the HDF5 file 30 | def print_hdf5_objects(name): 31 | print(name) 32 | 33 | f.visit(print_hdf5_objects) 34 | 35 | # Get all the time strings from the directory 36 | time_strings = [fname.split('.tiff')[0] for fname in os.listdir(dataset + 'scans/') if fname.endswith('.tiff')] 37 | 38 | for time_str in time_strings: 39 | labels = np.load(dataset + 'converted_labels/label_' + time_str + '.npy') 40 | pcd = o3d.io.read_point_cloud(dataset + 'converted_scans/point_cloud_' + time_str + '.pcd') 41 | 42 | # Create a boolean mask for tree and ground points 43 | tree_mask = (labels == 8) 44 | ground_mask = (labels == 1) 45 | tree_pcd = pcd.select_by_index(np.where(tree_mask)[0]) 46 | ground_pcd = pcd.select_by_index(np.where(ground_mask)[0]) 47 | if visualize: 48 | o3d.visualization.draw_geometries([tree_pcd]) 49 | o3d.visualization.draw_geometries([ground_pcd]) 50 | 51 | # Place the dataset in the group 52 | tree_pts = np.asarray(tree_pcd.points) 53 | ground_pts = np.asarray(ground_pcd.points) 54 | time_group = f.create_group(time_str) 55 | time_group.create_dataset('tree_trunks_'+time_str, data=tree_pts) 56 | time_group.create_dataset('ground_'+time_str, data=ground_pts) 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🌲 TreeScope: An Agricultural Robotics Dataset for LiDAR-Based Mapping of Trees in Forests and Orchards 2 | 3 | ## Overview 4 | 5 | [![Watch the video](media/treescope.gif)](https://www.youtube.com/watch?v=750oL-VsSIM) 6 | 7 | **TreeScope** is a robotics dataset for precision agriculture and forestry addressing the counting and mapping of trees in forestry and orchards. **TreeScope** provides LiDAR data from agricultural environments collected with robotics platforms, such as UAV and mobile robot platforms carried by vehicles and human operators. We provide ground-truth data for semantic segmentation and diameter estimation with over 1,800 manually annotated semantic labels for tree stems and field-measured tree diameters. We share benchmark scripts for these tasks that researchers may use to evaluate the accuracy of their algorithms. 8 | 9 | **TreeScope** processed data, raw data, and code are available to [download](https://test.treescope.org). 10 | For more information about our dataset, please visit [https://treescope.org](https://treescope.org/) or watch our [video](https://youtu.be/GgV1PmLEFeI). 11 | 12 | For detailed instructions on how to use this repository, please refer to this [step-by-step tutorial](https://docs.google.com/document/d/1j11YdxhWRJNfgbbc-5mdX1gpp7nh9M_Iu40lXpW7L3g/edit?usp=sharing) 13 | 14 | ## Converting Labels 15 | 16 | For converting H5 labels into 2D range images: 17 | ``` 18 | python3 semantic_segmentation/h5-to-labels.py --file --output 19 | ``` 20 | 21 | For converting labeled 2D range images into H5 labels: 22 | ``` 23 | python3 semantic_segmentation/full_data_preprocessor.py -D 24 | python3 semantic_segmentation/labels-to-h5.py -D 25 | ``` 26 | 27 | ## Diameter Estimation Benchmarks 28 | 29 | For calculating root-mean-square error of diameter estimation results compared to ground-truth: 30 | ``` 31 | python3 diameter_estimation/evaluate_dbh_rmse.py [output.csv] 32 | ``` 33 | 34 | ## Semantic Segmentation Benchmarks 35 | 36 | For calculating IoU of inference point cloud (projected to 2D range image) compared to ground-truth: 37 | ``` 38 | python3 semantic_segmentation/evaluate_iou.py stacked_image.png .yaml 39 | python3 semantic_segmentation/evaluate_iou.py ground_truth.png pred.png .yaml 40 | ``` 41 | 42 | # Citation 43 | You can access the paper from [arXiv](https://arxiv.org/abs/2310.02162). To 44 | cite our work, please use: 45 | ``` 46 | @misc{cheng2023treescope, 47 | title={TreeScope: An Agricultural Robotics Dataset for LiDAR-Based Mapping of Trees in Forests and Orchards}, 48 | author={Derek Cheng and Fernando Cladera Ojeda and Ankit Prabhu and Xu Liu and Alan Zhu and Patrick Corey Green and Reza Ehsani and Pratik Chaudhari and Vijay Kumar}, 49 | year={2023}, 50 | eprint={2310.02162}, 51 | archivePrefix={arXiv}, 52 | primaryClass={cs.RO} 53 | } 54 | ``` 55 | 56 | ## Contributions or Questions? 57 | 58 | Please [fill-out an issue](https://github.com/KumarRobotics/treescope/issues) if you have any questions. 59 | Do not hesitate to [send your pull request](https://github.com/KumarRobotics/treescope/pulls). -------------------------------------------------------------------------------- /semantic_segmentation/evaluate_iou.py: -------------------------------------------------------------------------------- 1 | """ 2 | python evaluate_iou.py ground_truth.png prediction.png wharton-forest.yaml 3 | unlabelled IoU: 0.9333916582292169 4 | road IoU: 0.8720882078890154 5 | tree_trunk IoU: 0.7105739475681673 6 | Mean IoU: 0.8386846045621331 7 | 8 | python evaluate_iou.py stacked_image.png wharton-forest.yaml 9 | unlabelled IoU: 0.9333916582292169 10 | road IoU: 0.8720882078890154 11 | tree_trunk IoU: 0.7105739475681673 12 | Mean IoU: 0.8386846045621331 13 | """ 14 | 15 | 16 | import cv2 17 | import numpy as np 18 | import yaml 19 | import argparse 20 | 21 | def create_mask_for_color(image, color): 22 | """Create a binary mask where the given color is white and all other colors are black.""" 23 | color_mask = np.all(image == color, axis=-1) 24 | return color_mask.astype(np.uint8) 25 | 26 | def calculate_iou(ground_truth_mask, prediction_mask): 27 | """Calculate the Intersection over Union (IoU) for a single class.""" 28 | intersection = np.logical_and(ground_truth_mask, prediction_mask) 29 | union = np.logical_or(ground_truth_mask, prediction_mask) 30 | if np.sum(union) == 0: 31 | return float('nan') 32 | else: 33 | return np.sum(intersection) / np.sum(union) 34 | 35 | def read_yaml(file_path): 36 | """Read a YAML file and return the contents.""" 37 | with open(file_path, 'r') as stream: 38 | data_loaded = yaml.safe_load(stream) 39 | return data_loaded 40 | 41 | def load_images(range_image=None, predicted_image=None, labeled_image=None): 42 | gt_image, pred_image = None, None 43 | 44 | # If a single range image is provided, split stacked image 45 | if range_image is not None: 46 | full_image = cv2.imread(range_image) 47 | if full_image is None: 48 | raise ValueError("Range image not found or the path is incorrect") 49 | 50 | image_height = full_image.shape[0] 51 | segment_height = image_height // 3 52 | range_img = full_image[:segment_height, :] 53 | gt_image = full_image[segment_height:2*segment_height, :] 54 | pred_image = full_image[2*segment_height:3*segment_height, :] 55 | 56 | # Save the segments to disk 57 | base_name = range_image.rsplit('.', 1)[0] 58 | cv2.imwrite(f'{base_name}-range_image.png', range_img) 59 | cv2.imwrite(f'{base_name}-ground_truth.png', gt_image) 60 | cv2.imwrite(f'{base_name}-pred.png', pred_image) 61 | 62 | 63 | # If separate images for prediction and labeled ground truth are provided, load them. 64 | elif predicted_image is not None and labeled_image is not None: 65 | gt_image = cv2.imread(labeled_image) 66 | pred_image = cv2.imread(predicted_image) 67 | 68 | if gt_image is None or pred_image is None: 69 | raise ValueError("One of the images is not found or the paths are incorrect") 70 | 71 | else: 72 | raise ValueError("Incorrect image arguments provided") 73 | 74 | return gt_image, pred_image 75 | 76 | 77 | def main(args): 78 | if len(args.images) == 2: 79 | # If there are only two arguments, the first is the stacked image, and the second is the YAML file. 80 | range_image = args.images[0] 81 | yaml_path = args.images[1] 82 | ground_truth_image, predicted_image = load_images(range_image=range_image) 83 | elif len(args.images) == 3: 84 | # If there are three arguments, the first two are separate images, and the third is the YAML file. 85 | ground_truth_image_path, prediction_image_path, yaml_path = args.images 86 | ground_truth_image, predicted_image = load_images(predicted_image=prediction_image_path, labeled_image=ground_truth_image_path) 87 | else: 88 | raise ValueError("Invalid number of arguments provided.") 89 | 90 | # Read the YAML file for color codes 91 | yaml_data = read_yaml(yaml_path) 92 | color_codes = {int(k): tuple(v) for k, v in yaml_data['color_map'].items()} 93 | classes = yaml_data['labels'] 94 | ious = [] 95 | # Calculate IoU for each class 96 | for class_id, color in color_codes.items(): 97 | ground_truth_mask = create_mask_for_color(ground_truth_image, color) 98 | prediction_mask = create_mask_for_color(predicted_image, color) 99 | iou = calculate_iou(ground_truth_mask, prediction_mask) 100 | 101 | if not np.isnan(iou): 102 | ious.append(iou) 103 | class_name = classes[class_id] 104 | print(f"{class_name} IoU: {iou}") 105 | 106 | # Calculate the mean IoU, ignoring NaN values 107 | mean_iou = np.nanmean(ious) 108 | print(f"Mean IoU: {mean_iou}") 109 | 110 | if __name__ == "__main__": 111 | parser = argparse.ArgumentParser(description='Calculate IoU for segmentation maps.') 112 | parser.add_argument('images', type=str, nargs='+', help='Path to the image file(s) followed by the YAML file.') 113 | args = parser.parse_args() 114 | 115 | main(args) -------------------------------------------------------------------------------- /semantic_segmentation/full_data_preprocessor.py: -------------------------------------------------------------------------------- 1 | import time 2 | import argparse 3 | import shutil 4 | import os 5 | import glob 6 | import numpy as np 7 | from sklearn.model_selection import train_test_split 8 | import re 9 | import cv2 10 | from pypcd import pypcd 11 | from termcolor import colored 12 | import traceback 13 | 14 | def train_test_split_save(for_jackle, for_indoor, args): 15 | if for_jackle: 16 | # Maybe dataset_name is redundant and can be removed 17 | dataset_name = args["data_dir"].split("/")[-2] 18 | label_prefix = "label_pano" #"label_sweep" 19 | print("Confirm that this is correct: prefix for the LIDAR data is :", label_prefix) 20 | time.sleep(2) 21 | else: 22 | if for_indoor: 23 | dataset_name = args["data_dir"].split("/")[-2] 24 | label_prefix = "label" 25 | else: 26 | dataset_name = args["data_dir"].split("/")[-2] 27 | label_prefix = "label" 28 | cloud_dir = args["data_dir"]+"converted_scans/" 29 | label_dir = args["data_dir"]+"converted_labels/" 30 | save_dir = args["data_dir"] 31 | clouds = glob.glob(cloud_dir + 'point_cloud_*.pcd') 32 | test_portion = 0.1 33 | print("percentage of data splited as test set: ", test_portion) 34 | train, test = train_test_split(clouds, test_size=test_portion, random_state=42) 35 | 36 | output_sequence_dir = save_dir + "sequences/" 37 | if os.path.exists(output_sequence_dir): 38 | print("output_sequence_dir already exists, we are going to remove it, which are: \n" + output_sequence_dir) 39 | input("Press Enter to continue...") 40 | shutil.rmtree(output_sequence_dir) 41 | os.makedirs(output_sequence_dir + "00/labels/") 42 | os.makedirs(output_sequence_dir + "01/labels/") 43 | os.makedirs(output_sequence_dir + "00/point_clouds/") 44 | os.makedirs(output_sequence_dir + "01/point_clouds/") 45 | os.makedirs(output_sequence_dir + "02/point_clouds/") 46 | 47 | 48 | 49 | for file in train: 50 | print("loading lables for file: ", file) 51 | number = re.findall(r'[0-9]+', file)[0] 52 | shutil.copy(file, save_dir + "sequences/00/point_clouds/point_cloud_" + number + ".pcd") 53 | label = label_dir + label_prefix + "_" + number + ".npy" 54 | shutil.copy(label, save_dir + "sequences/00/labels/label_" +number+".npy") 55 | print("successfully loaded lables for training file: ", file) 56 | for file in test: 57 | print("loading lables for file: ", file) 58 | number = re.findall(r'[0-9]+', file)[0] 59 | shutil.copy(file, save_dir + "sequences/01/point_clouds/point_cloud_" + number + ".pcd") 60 | label = label_dir + label_prefix + "_" + number + ".npy" 61 | shutil.copy(label, save_dir + "sequences/01/labels/label_" +number+".npy") 62 | print("successfully loaded lables for validation file: ", file) 63 | for file in test: 64 | # TODO: test data should probably be created separately from validation set 65 | print("loading lables for file: ", file) 66 | number = re.findall(r'[0-9]+', file)[0] 67 | shutil.copy(file, save_dir + "sequences/02/point_clouds/point_cloud_" + number + ".pcd") 68 | print("successfully loaded lables for test file: ", file) 69 | 70 | def convert_images_to_labels_pcds(for_jackle, for_indoor, args): 71 | if for_jackle: 72 | data_dir = args["data_dir"] 73 | else: 74 | if for_indoor: 75 | data_dir = args["data_dir"] 76 | else: 77 | data_dir = args["data_dir"] 78 | 79 | if for_jackle: 80 | prefix = "pano" # "sweep" 81 | fnames = glob.glob(data_dir + "labels/"+prefix+"*.png") 82 | print("Confirm that this is correct: prefix for the LIDAR data is :", prefix) 83 | time.sleep(2) 84 | else: 85 | fnames = glob.glob(data_dir + "labels/1*.png") # start with 1 to avoid including the viz_ stuff 86 | 87 | save_dir_point_cloud = data_dir + "converted_scans/" 88 | save_dir_label = data_dir + "converted_labels/" 89 | 90 | if os.path.exists(save_dir_point_cloud) or os.path.exists(save_dir_label): 91 | print("save_dir_point_cloud or save_dir_label already exists, we are going to remove them, which are: \n" + save_dir_point_cloud + " \n and: \n" +save_dir_label) 92 | input("Press Enter to continue...") 93 | 94 | if os.path.exists(save_dir_point_cloud): 95 | shutil.rmtree(save_dir_point_cloud) 96 | os.mkdir(save_dir_point_cloud) 97 | 98 | if os.path.exists(save_dir_label): 99 | shutil.rmtree(save_dir_label) 100 | os.mkdir(save_dir_label) 101 | 102 | 103 | num_classes = 10 104 | stats = np.zeros((num_classes, len(fnames))) 105 | file_idx = 0 106 | pc_size = -1 107 | d = None 108 | 109 | for fname in fnames: 110 | fname_no_png = fname.split(".png")[0] 111 | fname_no_prefix = fname_no_png.split('/')[-1] 112 | scan_fname = data_dir + "scans/" + fname_no_prefix +".tiff" 113 | label_fname = data_dir + "labels/" + fname_no_prefix + ".png" 114 | # print("currently loading labels and range images for scan: ", scan_fname) 115 | 116 | scan = cv2.imread(scan_fname, cv2.IMREAD_UNCHANGED) 117 | label = cv2.imread(label_fname) 118 | 119 | # all rays that do not have any return will be set as 0, and they are not considered during the back-propagation 120 | scan = np.nan_to_num(scan, copy=True, nan=0.0, posinf=None, neginf=None) 121 | # make sure the label for those points belong to "unlabeled" class, which is 0 122 | index = (scan.sum(axis=2) == 0) 123 | label_copy = label 124 | label_copy[index, :] = 0 125 | if (label.flatten().sum() != label_copy.flatten().sum() ): 126 | raise Exception("some of the rays without returns have labels, this should not happen!") 127 | else: 128 | label = label_copy 129 | 130 | # convert label into expected values 131 | # Ian's label tool convention: 132 | # - unlabelled: [0, 0, 0] -- expected value is 0 133 | # - road: [0, 0, 1] -- expected value is 1 134 | # - vegetation: [0, 1, 0] -- expected value is 2 135 | # - building: [1, 0, 0] -- expected value is 3 136 | # - grass/sidewalk: [0, 0.4, 0] -- expected value is 4 137 | # - vehicle: [0, 1, 1] -- expected value is 5 138 | # - human: [1, 0, 1] -- expected value is 6 139 | # - gravel: [0, 0.5, 0.5] -- expected value is 7 140 | # - tree_trunk: [0.5, 0.2, 0.2] -- expected value is 8 141 | # - light_pole: [1, 0.5, 0] -- expected value is 9 142 | # label_converted = np.zeros((label.shape[0], label.shape[1])) 143 | label_converted = label[:,:,0] 144 | 145 | convert_labels_into_specific_class = (for_jackle==False) 146 | if convert_labels_into_specific_class: 147 | print("converting labels into specific classes (e.g. only keeping car, light pole, trunk, road and background)") 148 | # input("Press Enter to confirm and continue, otherwise, kill the program and modify convert_images_to_labels_pcds.py ...") 149 | background_label = 0 150 | if for_indoor: 151 | road_label = 0 152 | else: 153 | road_label = 1 154 | 155 | # convert labels of non-car into background 156 | label_converted[label_converted==0] = background_label 157 | label_converted[label_converted==1] = road_label 158 | 159 | label_converted[label_converted==2] = background_label 160 | print(f'converting points with vegetation labels into background labels') 161 | 162 | label_converted[label_converted==3] = background_label 163 | print(f'converting points with building labels into background labels') 164 | 165 | label_converted[label_converted==4] = road_label 166 | print(f'converting points with grass/sidewalk labels into road labels') 167 | 168 | # stats of number of points to address class imbalance issues 169 | num_pts = 0 170 | for i in np.arange(num_classes): 171 | stats[i, file_idx] = np.sum((label_converted).ravel() == i) 172 | num_pts += stats[i, file_idx] 173 | # sanity check: all points should be included in stats: 174 | if (num_pts!=label_converted.ravel().shape): 175 | raise Exception("labels are not 0-9, invalid labels exist!!!") 176 | file_idx+=1 177 | 178 | 179 | # convert scan image into pcd data format 180 | # Pass xyz to Open3D.o3d.geometry.PointCloud and visualize 181 | # pcd = o3d.geometry.PointCloud() 182 | x = scan[:,:,0].flatten() 183 | y = scan[:,:,1].flatten() 184 | z = scan[:,:,2].flatten() 185 | xyz = np.zeros((x.shape[0],3)) 186 | xyz[:,0] = x 187 | xyz[:,1] = y 188 | xyz[:,2] = z 189 | mean = np.mean(xyz, axis=0) 190 | # print(mean) 191 | intensity = scan[:,:,3].flatten() 192 | 193 | # faster approach 194 | pc = pypcd.make_xyz_point_cloud(xyz) 195 | pc.pc_data = pc.pc_data.flatten() 196 | # old_md = pc.get_metadata() 197 | # new_dt = [(f, pc.pc_data.dtype[f]) for f in pc.pc_data.dtype.fields] 198 | # new_data = [pc.pc_data[n] for n in pc.pc_data.dtype.names] 199 | md = {'fields': ['intensity'], 'count': [1], 'size': [4],'type': ['F']} 200 | 201 | #################################### Speed up by avoiding creating recarray multiple times, which is very slow ####################################################################### 202 | if pc_size == -1: 203 | # initialize 204 | pc_size = len(pc.pc_data) 205 | d = np.rec.fromarrays((np.random.random(len(pc.pc_data)))) 206 | elif len(pc.pc_data) != pc_size: 207 | print("different size point cloud received, recreating numpy.recarray, which is very slow!") 208 | pc_size = len(pc.pc_data) 209 | d = np.rec.fromarrays((np.random.random(len(pc.pc_data)))) 210 | # else: 211 | # # continue using existing d 212 | # print("point cloud size is the same, which is good!") 213 | ################################################################################################################################################################################### 214 | 215 | try: 216 | newpc = pypcd.add_fields(pc, md, d) 217 | except: 218 | traceback.print_exc() 219 | raise Exception(colored("READ THIS: for this error, just comment out the two lines (should be line 443 and 444) in pypcd.py file!",'green')) 220 | 221 | # new_md = newpc.get_metadata() 222 | # setting intensity data 223 | newpc.pc_data['intensity'] = intensity 224 | 225 | 226 | # save point cloud as pcd files in converted_scans folder 227 | point_cloud_final_fname = save_dir_point_cloud + "point_cloud" + "_" + str(fname_no_prefix) + ".pcd" 228 | newpc.save_pcd(point_cloud_final_fname, compression='binary_compressed') 229 | # print("pcds are saved in converted_scans folder!") 230 | # remove intermediate point cloud 231 | # os.remove(point_cloud_temp_fname) 232 | 233 | # save labels as an 1-d array in converted_labels folder 234 | label_converted = label_converted.ravel() 235 | 236 | np.save(save_dir_label + "label" + "_" + str(fname_no_prefix) + ".npy", label_converted) 237 | # print("labels are saved in converted_labels folder!") 238 | print("finished processing: ", file_idx, " out of ", len(fnames), " files") 239 | 240 | # print out the stats 241 | print("std values of number of points for each class are: ") 242 | print(np.std(stats, axis=1)) 243 | print("mean values of number of points for each class are: ") 244 | print(np.mean(stats, axis=1)) 245 | print("mean values of number points for each class / total points are: ") 246 | percent_array = np.mean(stats, axis=1) / np.sum(np.mean(stats, axis=1)) 247 | print(percent_array) 248 | np.savetxt(data_dir + "class_points_divided_by_total_points.txt", percent_array,fmt='%.7f') 249 | 250 | print("total values of number points for each class: ") 251 | total_array = np.sum(stats, axis=1) 252 | print(total_array) 253 | 254 | 255 | 256 | if __name__=="__main__": 257 | ap = argparse.ArgumentParser() 258 | ap.add_argument("--data_dir", "-D", required=True, help="The main dataset directory where the labeled data is stored with the final '/' included") 259 | args = vars(ap.parse_args()) 260 | 261 | for_jackle = False 262 | for_indoor = False 263 | convert_images_to_labels_pcds(for_jackle, for_indoor, args) 264 | print('convert_images_to_labels_pcds finished, starting training and test set split in 2 seconds...') 265 | time.sleep(2) 266 | train_test_split_save(for_jackle, for_indoor, args) -------------------------------------------------------------------------------- /diameter_estimation/process_tree_node.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # title : 3 | # description : 4 | # author :Ankit Prabhu 5 | 6 | import rospy 7 | from sensor_msgs.msg import PointCloud2, PointField 8 | from std_msgs.msg import Header 9 | import ros_numpy 10 | import sys 11 | import numpy as np 12 | from sklearn.cluster import OPTICS, DBSCAN 13 | import time 14 | import open3d as o3d 15 | import json 16 | import webcolors 17 | import matplotlib.pyplot as plt 18 | import pandas as pd 19 | import open3d.visualization.rendering as rendering 20 | import csv 21 | import os 22 | 23 | # TODO 4) Test for other bags to see results 24 | class ProcessTreeNode: 25 | 26 | def __init__(self): 27 | 28 | self.tree_cloud_sub = rospy.Subscriber( 29 | "/tree_cloud_world", PointCloud2, callback=self.tree_pc, queue_size=10) 30 | self.clus_tree_cloud_pub = rospy.Publisher( 31 | "/tree_inst", PointCloud2, queue_size=100) 32 | 33 | # Tunable Params 34 | # ***************************** 35 | # Param Tuning Cases 36 | #################################### 37 | # When Trees are close together:- 38 | # Use conservative clustering for separation init-[0.1, 10] control-[0.15, 25] 39 | # The issues with that is cannot measure till long height especially for thin trees 40 | ###################################### 41 | ###################################### 42 | # When Trees are Far Away (Less Noise):- 43 | # init - [0.3, 20] 44 | # control - [(0.15,15/20),(0.17,20-> seems to work better)] 45 | # For Tree 3 (Very Noise) [init 0.1, 20; control 0.15,33] 46 | ######################################## 47 | # For noisy tree and split branching 48 | # init_db-[0.1, 25/15], else default is [0.3, 75/50] (may miss top points, use for noisy spots) 49 | # control_db - [0.15, 20/15] else default [0.5, 50] (heuristic) 50 | # init_min_clus - 200 for noise 500 for clean (low value as clusters are more clean) 51 | # control_min_clus- 400 or 500 (heuristic) 52 | # ********************************** 53 | 54 | # DBSCAN cluster params 55 | self.valid_range_threshold = 10000 56 | 57 | # to find the core point (lenient) 58 | self.init_min_sample_threshold_dbscan = 30 #[20] 59 | self.init_epsilon_value_dbscan = 0.5 #[0.3] 60 | self.init_min_cluster_pts = 100 # throws out an cluster size less than this (gets rid of false positives) 61 | 62 | # once you've found the core point (strict to throw out false positives) 63 | self.control_pt_min_sample_dbscan = 80 #[20] 64 | self.control_pt_eps_dbscan = 0.2 #[0.17] 65 | self.control_pt_min_cluster_pts = 1000 #400 66 | 67 | # post processing of json file 68 | self.step_size_for_control_pts = 10 #15 how well you see green and red pts 69 | self.kdtree_search_radius = 1 70 | self.slash_height_thresh = 0.03 71 | self.number_to_round_to = 3 72 | self.per_tree_profile_height = 0.5 73 | self.min_slash_points_for_median_diameter = 2 74 | self.step_size_for_tree_diameters = 0.1 75 | 76 | self.dbh_percentile = 50 77 | self.max_dbh = 0.6 # throw out values greater than this 78 | self.control_pts_cluster_time = 220 # cluster accumulation period 79 | 80 | # Flags 81 | self.show_slashes = False # show the red/green pts (to debug the init and control params) 82 | self.compute_tree_profile = True # do the post processing of DBH estimations 83 | self.viz_clustered_control_points = True # click in vispy to see which cluster is which id 84 | self.plot_tree_profile = False 85 | self.save_stats_as_df = False 86 | self.save_stats_as_csv = False 87 | self.converted_indices_filename = None 88 | self.data_path = "data/" # include the last slash 89 | 90 | # Load trajectory data from traj.txt 91 | self.traj_file = None 92 | 93 | self.pc_fields_ = make_fields() 94 | self.control_points_dict = {} 95 | self.cluster_points_dict = {} 96 | self.last_time_stamp = None 97 | # Permanently generate 500 colors for clusters 98 | self.perma_clus_color= self.gen_rand_colors(500) 99 | 100 | def tree_pc(self, msg): 101 | 102 | # Numpifing the point cloud msg 103 | cur_timestamp = msg.header.stamp 104 | if self.last_time_stamp == None: 105 | self.last_time_stamp = cur_timestamp 106 | 107 | tree_cloud_full = ros_numpy.numpify(msg) 108 | # Replacing NaNs 109 | x_coords = np.nan_to_num(tree_cloud_full['x'].flatten()) 110 | y_coords = np.nan_to_num(tree_cloud_full['y'].flatten()) 111 | z_coords = np.nan_to_num(tree_cloud_full['z'].flatten()) 112 | # 10.0 as place holder for NaNs present in intensities 113 | mod_intensities = np.nan_to_num( 114 | tree_cloud_full['intensity'].flatten(), nan=10.0) 115 | tree_cloud_full = np.zeros((x_coords.shape[0], 4)) 116 | tree_cloud_full[:, 0] = x_coords 117 | tree_cloud_full[:, 1] = y_coords 118 | tree_cloud_full[:, 2] = z_coords 119 | tree_cloud_full[:, 3] = mod_intensities 120 | 121 | # Selecting only tree points 122 | tree_valid_idx = tree_cloud_full[:, 3] == 8 123 | tree_cloud = tree_cloud_full[tree_valid_idx, :] 124 | 125 | # Thresholding by Range 126 | valid_range_idx = self.threshold_by_range( 127 | self.valid_range_threshold, tree_cloud) # Param (Range Thresholding) 128 | tree_cloud = tree_cloud[valid_range_idx, :] 129 | 130 | # Clustering Trees 131 | tree_labels = self.cluster_tree( 132 | pc_xyz=tree_cloud, min_samples=self.init_min_sample_threshold_dbscan, eps=self.init_epsilon_value_dbscan) # Param (DBSCAN n and eps) 133 | 134 | clustered_tree_cloud = np.zeros_like(tree_cloud) 135 | clustered_tree_cloud[:, 0] = tree_cloud[:, 0] 136 | clustered_tree_cloud[:, 1] = tree_cloud[:, 1] 137 | clustered_tree_cloud[:, 2] = tree_cloud[:, 2] 138 | clustered_tree_cloud[:, 3] = tree_labels 139 | clean_clus_idx = clustered_tree_cloud[:, 3] != -1 140 | clustered_tree_cloud = clustered_tree_cloud[clean_clus_idx, :] 141 | 142 | self.publish_clustered_cloud( 143 | clustered_tree_cloud, cur_timestamp, "odom", self.clus_tree_cloud_pub) 144 | 145 | # Working on individual tree segment 146 | tree_instances = np.unique(clustered_tree_cloud[:, 3]) 147 | for count, inst_idx in enumerate(tree_instances): 148 | 149 | cur_instant_pc_xyz = np.around( 150 | clustered_tree_cloud[clustered_tree_cloud[:, 3] == inst_idx, :3], self.number_to_round_to) 151 | 152 | if cur_instant_pc_xyz.shape[0] < self.init_min_cluster_pts: # Param (Filter initial clusters to remove noisy clusters) 153 | continue 154 | 155 | o_pcd = o3d.geometry.PointCloud() 156 | o_pcd.points = o3d.utility.Vector3dVector(cur_instant_pc_xyz) 157 | o_pcd_tree = o3d.geometry.KDTreeFlann(o_pcd) 158 | 159 | o_pcd.paint_uniform_color([0.3, 0.3, 0.3]) 160 | 161 | z_profile, z_profile_unique_idx = np.unique( 162 | cur_instant_pc_xyz[:, 2], return_index=True) 163 | z_profile_unique_idx_sorted = z_profile_unique_idx[np.argsort( 164 | z_profile)] 165 | 166 | z_profile_percentile_selected_idx = z_profile_unique_idx_sorted[np.arange(0, z_profile_unique_idx_sorted.shape[0], self.step_size_for_control_pts)] # Param (Step size controls number of control points) 167 | # print("Number of z values is {} for inst-idx {} for cloud of shape {}".format(z_profile.shape[0], count+1, cur_instant_pc_xyz.shape[0])) 168 | # Quering the KD-Tree 169 | 170 | for i in range(z_profile_percentile_selected_idx.shape[0]): 171 | 172 | cur_control_point = cur_instant_pc_xyz[z_profile_percentile_selected_idx[i], :].reshape(-1,1) 173 | [k, tree_idx, _] = o_pcd_tree.search_radius_vector_3d(cur_control_point, self.kdtree_search_radius) # Param (Search radius to link points to control points) 174 | cur_tree_slash_points = cur_instant_pc_xyz[tree_idx] 175 | cur_tree_slash_filtered = cur_tree_slash_points[abs(cur_tree_slash_points[:, 2]-cur_control_point[2]) <= self.slash_height_thresh, :] # Param (Controls tree ring formation) 176 | tree_idx_slash = np.asarray(tree_idx)[abs(cur_tree_slash_points[:, 2]-cur_control_point[2]) <= self.slash_height_thresh] # Param (Controls tree ring formation) 177 | 178 | cur_control_point_diameter = self.calc_max_norm_values(cur_tree_slash_filtered) 179 | 180 | if not self.control_points_dict: 181 | self.control_points_dict["points"] = [] 182 | else: 183 | self.control_points_dict["points"].append([cur_control_point.reshape(-1,)[0], cur_control_point.reshape(-1,)[1], cur_control_point.reshape(-1,)[2], cur_control_point_diameter]) 184 | # print("For {} instance and control point {}, diameter is {}".format(count+1, i+1, cur_control_point_diameter)) 185 | o_pcd.colors[z_profile_percentile_selected_idx[i]] = [1, 0, 0] 186 | np.asarray(o_pcd.colors)[tree_idx_slash[1:], :] = [0, 1, 0] 187 | # o3d.visualization.draw_geometries([o_pcd]) 188 | # o_pcd.paint_uniform_color([0.3, 0.3, 0.3]) 189 | 190 | # np.asarray(o_pcd.colors)[z_profile_percentile_selected_idx, :] = [1, 0, 0] 191 | if self.show_slashes: 192 | o3d.visualization.draw_geometries([o_pcd]) 193 | 194 | # with open("control_points.json", "w") as ofile: 195 | # json.dump(self.control_points_dict, ofile) 196 | 197 | if (cur_timestamp.to_sec() - self.last_time_stamp.to_sec()) > self.control_pts_cluster_time: # Param (When to cluster accumulated control points) 198 | 199 | self.last_time_stamp = cur_timestamp 200 | self.cluster_control_points(visualize=self.viz_clustered_control_points) 201 | 202 | def cluster_centroid_check(self, centroid, cluster_centroids, threshold=1.0): 203 | """ 204 | L2 distance check of cluster centroids 205 | centroid is np.array of (3,) 206 | cluster_centroids is a list of centroids 207 | Check the L2 distance is at least threshold meters apart 208 | """ 209 | # print("Checking centroid {}".format(centroid)) 210 | if len(cluster_centroids) == 0: 211 | return True 212 | 213 | for cluster_centroid in cluster_centroids: 214 | distance = np.linalg.norm(centroid - cluster_centroid) 215 | if distance < threshold: 216 | print("Failed distance check between {} and {}".format(centroid, cluster_centroid)) 217 | return False 218 | 219 | return True 220 | 221 | 222 | def cluster_control_points(self, visualize=True): 223 | 224 | self.cluster_points_dict = {} 225 | points_with_dia = np.array(self.control_points_dict["points"]) # 4th column is the diameter 226 | points_xyz = points_with_dia[:, :3] 227 | print("\n----------Control Point Clustering-------------\n") 228 | print("Num of full control points {}".format(points_xyz.shape[0])) 229 | 230 | if visualize: 231 | 232 | mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.5, origin=[0, 0, 0]) 233 | o_pcd = o3d.geometry.PointCloud() 234 | o_pcd.points = o3d.utility.Vector3dVector(points_xyz) 235 | o_pcd.paint_uniform_color([0.0, 0.0, 0.0]) 236 | # o3d.visualization.draw_geometries([o_pcd, mesh_frame]) 237 | 238 | points_xyz_cluster_labels = self.cluster_tree(points_xyz, min_samples=self.control_pt_min_sample_dbscan, eps=self.control_pt_eps_dbscan, use_3d=True) # Param (DBScan for control points) 239 | num_of_clusters = np.unique(points_xyz_cluster_labels) 240 | 241 | cluster_centroids = [] 242 | filter_indices = np.array([]) 243 | 244 | # if visualize: 245 | 246 | # if self.perma_clus_color.shape[0] == 0 or num_of_clusters.shape[0] self.max_dbh: 513 | dbh_over_max += 1 514 | dbh_dia = None 515 | else: 516 | dbh_not_enough_pts += 1 517 | 518 | for cur_z_height in z_profile_heights: 519 | 520 | cur_control_slash_idx = abs(cur_cluster_control_pts[:, 2] - cur_z_height) <= self.slash_height_thresh #self.slash_height_thresh # Param (height thresh for ring generation) 521 | 522 | if cur_control_slash_idx.sum() < self.min_slash_points_for_median_diameter: # Param (min number of slash points for diameter to be medianed at that control point) 523 | 524 | print("Not enough slash point ({}) at that height {:.3f}m so skipping this!!".format(cur_control_slash_idx.sum(), cur_z_height)) 525 | else: 526 | 527 | cur_control_slash_pts = cur_cluster_control_pts[cur_control_slash_idx, :] 528 | # Editing how the final diameter is estimated based on noise 529 | # if cur_z_height <= 5.0: 530 | # cur_height_diameter_median = np.percentile(cur_control_slash_pts[:, 3], 75) 531 | # else: 532 | # cur_height_diameter_median = np.percentile(cur_control_slash_pts[:, 3], 75) 533 | 534 | # if abs(np.median(cur_control_slash_pts[:, 3]) - np.max(cur_control_slash_pts[:, 3])) > 0.1: 535 | # cur_height_diameter_median = np.percentile(cur_control_slash_pts[:, 3], 65) 536 | # else: 537 | # cur_height_diameter_median = np.percentile(cur_control_slash_pts[:, 3], 75) 538 | 539 | ###################################################################### 540 | # New method for calculating diameter 541 | ####################################################################### 542 | cur_height_diameter_median = np.percentile(cur_control_slash_pts[:, 3], self.dbh_percentile) # hard code to 85th percentile 543 | if dbh_dia is not None: 544 | if cur_height_diameter_median > dbh_dia[0]: 545 | 546 | new_control_points = cur_control_slash_pts[cur_control_slash_pts[:, 3]= 0.1) 694 | # filter out points larger than valid_range_threshold 695 | valid_indices = np.logical_and( 696 | (range_values < valid_range_threshold), valid_indices) 697 | 698 | return valid_indices 699 | 700 | def calc_max_norm_values(self, vals): 701 | 702 | max_norm = 0 703 | 704 | for i in range(vals.shape[0]): 705 | 706 | diff_vec = vals - vals[i] 707 | max_dia = np.max(np.linalg.norm(diff_vec, axis=1)) 708 | 709 | if max_dia > max_norm: 710 | 711 | max_norm = max_dia 712 | 713 | return max_norm 714 | 715 | def make_fields(): 716 | # manually add fiels by Ian 717 | fields = [] 718 | field = PointField() 719 | field.name = 'x' 720 | field.count = 1 721 | field.offset = 0 722 | field.datatype = PointField.FLOAT32 723 | fields.append(field) 724 | 725 | field = PointField() 726 | field.name = 'y' 727 | field.count = 1 728 | field.offset = 4 729 | field.datatype = PointField.FLOAT32 730 | fields.append(field) 731 | 732 | field = PointField() 733 | field.name = 'z' 734 | field.count = 1 735 | field.offset = 8 736 | field.datatype = PointField.FLOAT32 737 | fields.append(field) 738 | 739 | field = PointField() 740 | field.name = 'intensity' 741 | field.count = 1 742 | field.offset = 12 743 | field.datatype = PointField.FLOAT32 744 | fields.append(field) 745 | return fields 746 | 747 | if __name__ == "__main__": 748 | 749 | rospy.init_node("tree_cloud_node") 750 | tree_node = ProcessTreeNode() 751 | while not rospy.is_shutdown(): 752 | 753 | print("Tree Node Started") 754 | rospy.spin() 755 | 756 | if tree_node.compute_tree_profile: 757 | tree_node.gen_diameter_profile() 758 | print("Node Killed") 759 | --------------------------------------------------------------------------------