├── .gitignore ├── M3ED_banner.webp ├── README.md ├── build_system ├── README.md ├── bag_processing │ ├── hdf52imu_plot.py │ ├── hdf52media.bash │ ├── hdf52media.py │ ├── hdf52stripped.py │ ├── rosbag2hdf5.bash │ ├── rosbag2hdf5.py │ ├── rosbag2timecorrection.bash │ ├── rosbag2timecorrection.py │ ├── rosbag2verify.bash │ └── rosbag2verify.py ├── calibration │ ├── aprilgrid.yaml │ ├── event_bag_convert │ │ ├── CMakeLists.txt │ │ ├── include │ │ │ └── event_bag_convert │ │ │ │ └── prettyprint.hpp │ │ ├── package.xml │ │ └── src │ │ │ └── bag_to_all_frames.cpp │ ├── kalibr_all │ ├── kalibr_events │ ├── kalibr_ovc │ ├── only_ovc_calib.py │ ├── rosbag2camcalibration.bash │ ├── rosbag2imucalibration.bash │ └── vn100.yaml ├── dataset_paths.py ├── docker │ ├── Dockerfile │ └── docker_checks.sh ├── jenkins │ ├── Jenkinsfile_CPU_stage1 │ ├── Jenkinsfile_CPU_stage2 │ ├── Jenkinsfile_CPU_stage3 │ ├── Jenkinsfile_GPU │ ├── Jenkinsfile_dispatcher │ └── Jenkinsfile_docker_build ├── lidar_depth │ ├── concatenate_pcd_uncompressed │ │ ├── CMakeLists.txt │ │ ├── package.xml │ │ └── src │ │ │ └── pcl_concatenate_points_pcd.cpp │ ├── fasterlio_gt.py │ ├── fasterlio_plot.py │ ├── gt2evo.bash │ ├── gt2evo.py │ ├── gt2media.bash │ ├── gt2media.py │ ├── gt2verify.bash │ ├── gt2verify.py │ ├── lidar_calib.py │ ├── long_range_ouster64.yaml │ ├── ouster_bag_convert │ │ ├── CMakeLists.txt │ │ ├── package.xml │ │ └── src │ │ │ └── bag_rewriter.cpp │ ├── pcd2gt.bash │ ├── rosbag2oustermetadata.py │ ├── rosbag2pcd.bash │ ├── short_range_ouster64.yaml │ └── util.py ├── preamble.bash ├── semantics │ ├── hdf52internimage.bash │ ├── internimage.py │ ├── internimage2media.bash │ └── internimage2media.py └── stats_and_summary │ ├── dataset_statistics.py │ └── getStats.bash ├── dataset_list.yaml ├── event_loading_by_ms.py ├── image_and_event_loading.py ├── lidar_loading.py └── tools └── download_m3ed.py /.gitignore: -------------------------------------------------------------------------------- 1 | utils/ 2 | __pycache__ 3 | Log/ 4 | 5 | tools/dataset_lists.yaml 6 | tools/output 7 | -------------------------------------------------------------------------------- /M3ED_banner.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniilidis-group/m3ed/df739f20fba41ac6da8c22f4260c305875e391ed/M3ED_banner.webp -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![alt text](https://github.com/daniilidis-group/m3ed/blob/main/M3ED_banner.webp) 2 | 3 | # M3ED 4 | 5 | ## Intro 6 | 7 | M3ED provides high-quality synchronized and labeled data from multiple platforms, including wheeled ground vehicles, legged robots, and aerial robots, operating in challenging conditions such as driving along off-road trails, navigating through dense forests, and executing aggressive flight maneuvers. 8 | 9 | M3ED processed data, raw data, and code are available to 10 | [download](https://m3ed.io/download/). For more information about our dataset, 11 | please visit [https://m3ed.io](https://m3ed.io/). 12 | 13 | ## Sample Loading 14 | 15 | M3ED provides helper datasets within the HDF5 files to more easily load synchronized data. 16 | 17 | For loading synchronized images: 18 | ``` 19 | python3 image_and_event_loading.py --data_h5 --idx 500 --n_events 200000 20 | ``` 21 | 22 | For loading events in 1ms chunks: 23 | ``` 24 | python3 event_loading_by_ms.py --data_h5 --ms 1000 25 | ``` 26 | -------------------------------------------------------------------------------- /build_system/README.md: -------------------------------------------------------------------------------- 1 | # M3ED Processing Scripts 2 | 3 | This repo contains the scripts used to process the M3ED data. These scripts are 4 | run automatically in Jenkins, but you may run them in a Docker container with 5 | the provided `Dockerfile`. 6 | 7 | 8 | ## Jenkins build architecture 9 | 10 | Jenkins was used as our deployment platform. Jenkinsfiles are inside 11 | `build_system/jenkins`. 12 | 13 | There are six Jenkins pipelines: 14 | 15 | - Dispatcher: this pipeline gathers all the input files, and orchestrates the 16 | execution of the work among the different nodes. 17 | 18 | - Docker-build: this pipeline builds the docker image **in each node**. This 19 | ensures that the processing pipelines have the most up-to-date docker image. 20 | 21 | - CPU Stage 1: This pipeline runs on all bags and generates the time 22 | synchronization files, the h5 output files, and the ground truth files. For 23 | calibration files, this pipeline runs Kalibr. 24 | 25 | - CPU Stage 2: This pipeline generates videos and media from the processed 26 | files. 27 | 28 | - GPU Stage: This pipeline runs all the GPU-related tasks, such as InternImage. 29 | 30 | - CPU Stage 3: Generates reports and tests on the existing data. 31 | 32 | ## Scripts 33 | 34 | ### Calibration 35 | 36 | Scripts to run camera and IMU calibration. `event_bag_convert` is a script that 37 | generates integrated images from the events to run camera calibration. 38 | 39 | ### Bag Processing 40 | 41 | - `bag_processing/rosbag2verify.py`: verifies bag timestamps and integrity. 42 | 43 | - `bag_processing/rosbag2timecorrection.py`: generates the time correction files 44 | to synchronize all streams. 45 | 46 | - `bag_processing/rosbag2hdf5.py`: converts the bag data into HDF5. 47 | 48 | - `bag_processing/hdf52media.py`: generates media output from the HDF5 (videos, plots, images). 49 | 50 | - `bag_processing/hdf52stripped.py`: strip the test bags. 51 | 52 | ### Lidar and Depth 53 | 54 | Scripts to run FasterLIO on the data and generates depth and pose. 55 | 56 | ### Semantics 57 | 58 | Scripts to run InternImage and generate semantic outputs of the images. 59 | 60 | ### Stats and Summary 61 | 62 | Scripts to generate statistics and summaries from the different files. 63 | 64 | ## License 65 | M3ED is released under the [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/). 66 | -------------------------------------------------------------------------------- /build_system/bag_processing/hdf52imu_plot.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pdb 3 | import argparse 4 | 5 | import numpy as np 6 | import h5py 7 | import matplotlib.pyplot as plt 8 | 9 | from scipy.interpolate import splev, splrep, barycentric_interpolate 10 | from scipy.spatial.transform import Rotation 11 | from scipy import signal 12 | 13 | def resample_imu(imu, imu_ts, sample_times): 14 | spl = splrep(imu_ts, imu) 15 | return splev( sample_times, spl) 16 | 17 | def filter_imu_sample(imu, numtaps=7, f=0.10): 18 | coeffs = signal.firwin(numtaps, f) 19 | filtered = np.zeros(imu.shape) 20 | filtered[:,0] = signal.lfilter(coeffs, 1.0, imu[:,0]) 21 | filtered[:,1] = signal.lfilter(coeffs, 1.0, imu[:,1]) 22 | filtered[:,2] = signal.lfilter(coeffs, 1.0, imu[:,2]) 23 | return filtered 24 | 25 | def align_imus(ovc_omega, ovc_accel, ouster_omega, ouster_accel): 26 | """ 27 | Solving ouster_R_ovc * ovc_omega = ouster_omega 28 | """ 29 | ovc_measurements = np.concatenate( [ovc_omega] ).T 30 | ouster_measurements = np.concatenate( [ouster_omega] ).T 31 | 32 | ouster_R_ovc = (ouster_measurements @ np.linalg.pinv(ovc_measurements)) 33 | U,S,Vh = np.linalg.svd(ouster_R_ovc) 34 | ouster_R_ovc = U@Vh 35 | ouster_R_ovc[:,0] = -ouster_R_ovc[:,0] 36 | ouster_R_ovc[:,2] = np.cross(ouster_R_ovc[:,0],ouster_R_ovc[:,1]) 37 | 38 | assert np.all(np.isclose( np.linalg.det(ouster_R_ovc), 1.0 )) 39 | 40 | return ouster_R_ovc 41 | 42 | 43 | if __name__ == "__main__": 44 | parser = argparse.ArgumentParser(description="Creates vids and imgs from h5.") 45 | # input h5 file 46 | parser.add_argument("--h5fn", help="The h5 file to convert.", 47 | required=True) 48 | args = parser.parse_args() 49 | 50 | if not os.path.isfile(args.h5fn): 51 | sys.exit("The input h5 file %s does not exist." % args.h5fn) 52 | 53 | h5f = h5py.File(args.h5fn, 'r') 54 | 55 | ovc_mask = np.isfinite(h5f['/ovc/imu/accel'][:,0]) 56 | ovc_ts = h5f['/ovc/imu/ts'][...][ovc_mask] 57 | ovc_accel = h5f['/ovc/imu/accel'][...][ovc_mask,:] 58 | ovc_omega = h5f['/ovc/imu/omega'][...][ovc_mask,:] 59 | 60 | ouster_mask = np.isfinite(h5f['/ouster/imu/accel'][1000:-1000,0]) 61 | ouster_ts = h5f['/ouster/imu/ts'][1000:-1000][ouster_mask] 62 | ouster_accel = h5f['/ouster/imu/accel'][1000:-1000][ouster_mask,:] 63 | ouster_omega = h5f['/ouster/imu/omega'][1000:-1000][ouster_mask,:] 64 | 65 | ovc_resampled_omega = np.stack( [resample_imu(ovc_omega[:,i], ovc_ts, ouster_ts) for i in range(3)], axis=-1 ) 66 | ovc_resampled_accel = np.stack( [resample_imu(ovc_accel[:,i], ovc_ts, ouster_ts) for i in range(3)], axis=-1 ) 67 | 68 | ouster_accel = filter_imu_sample(ouster_accel) 69 | ouster_omega = filter_imu_sample(ouster_omega) 70 | 71 | ovc_omega = filter_imu_sample(ovc_resampled_omega) 72 | ovc_accel = filter_imu_sample(ovc_resampled_accel) 73 | ovc_ts = ouster_ts 74 | 75 | ouster_R_ovc = align_imus(ovc_omega, ovc_accel, ouster_omega, ouster_accel) 76 | 77 | print(ouster_R_ovc) 78 | print(np.rad2deg(Rotation.from_matrix(ouster_R_ovc).as_euler('xyz'))) 79 | print(np.linalg.det(ouster_R_ovc)) 80 | 81 | transformed_ovc_omega = (ouster_R_ovc @ ovc_omega.T).T 82 | transformed_ovc_accel = (ouster_R_ovc @ ovc_accel.T).T 83 | 84 | fig, axes = plt.subplots(3,2,sharex=True,sharey=True) 85 | 86 | axes[0,0].set_title('OVC gyro') 87 | axes[0,0].plot( ovc_ts, transformed_ovc_omega[:,0] ) 88 | axes[1,0].plot( ovc_ts, transformed_ovc_omega[:,1] ) 89 | axes[2,0].plot( ovc_ts, transformed_ovc_omega[:,2] ) 90 | 91 | axes[0,1].set_title('Ouster gyro') 92 | axes[0,1].plot( ouster_ts[:-1], ouster_omega[:-1,0] ) 93 | axes[1,1].plot( ouster_ts[:-1], ouster_omega[:-1,1] ) 94 | axes[2,1].plot( ouster_ts[:-1], ouster_omega[:-1,2] ) 95 | 96 | fig, axes = plt.subplots(3,2,sharex=True,sharey=True) 97 | 98 | axes[0,0].set_title('OVC accel') 99 | axes[0,0].plot( ovc_ts, transformed_ovc_accel[:,0] ) 100 | axes[1,0].plot( ovc_ts, transformed_ovc_accel[:,1] ) 101 | axes[2,0].plot( ovc_ts, transformed_ovc_accel[:,2] ) 102 | 103 | axes[0,1].set_title('Ouster accel') 104 | axes[0,1].plot( ouster_ts[:-1], ouster_accel[:-1,0] ) 105 | axes[1,1].plot( ouster_ts[:-1], ouster_accel[:-1,1] ) 106 | axes[2,1].plot( ouster_ts[:-1], ouster_accel[:-1,2] ) 107 | 108 | plt.figure() 109 | plt.plot(ovc_ts, transformed_ovc_omega[:,0] ) 110 | plt.plot( ouster_ts, ouster_omega[:,0]) 111 | 112 | plt.figure() 113 | plt.plot(ovc_ts, transformed_ovc_omega[:,1] ) 114 | plt.plot( ouster_ts, ouster_omega[:,1]) 115 | 116 | plt.figure() 117 | plt.plot(ovc_ts, transformed_ovc_omega[:,2] ) 118 | plt.plot( ouster_ts, ouster_omega[:,2]) 119 | 120 | plt.show() 121 | -------------------------------------------------------------------------------- /build_system/bag_processing/hdf52media.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ./build_system/preamble.bash 4 | 5 | output_files=("$EVENTS_VIDEO_RAW" "$RGB_VIDEO_RAW") 6 | check_files output_files 7 | check_free_space fixed 8 | 9 | # create tmp variables for videos 10 | EVENTS_VIDEO_RAW_TMP="${EVENTS_VIDEO_RAW%.*}_tmp.${EVENTS_VIDEO_RAW##*.}" 11 | RGB_VIDEO_RAW_TMP="${RGB_VIDEO_RAW%.*}_tmp.${RGB_VIDEO_RAW##*.}" 12 | 13 | # Run bag processing 14 | echo -e "${BLUE}Generating raw videos${NC}" 15 | python3 build_system/bag_processing/hdf52media.py \ 16 | --h5fn "$H5_PATH" \ 17 | --out_events_gray "$EVENTS_VIDEO_RAW_TMP" \ 18 | --out_rgb "$RGB_VIDEO_RAW_TMP" 19 | if [ $? -ne 0 ]; then 20 | echo -e "${RED}Error creating media files for $H5_PATH${NC}" 21 | rm "$EVENTS_VIDEO_RAW_TMP" "$RGB_VIDEO_RAW_TMP" 22 | exit 1 23 | fi 24 | echo -e "${GREEN}Raw videos finished${NC}" 25 | mv "$EVENTS_VIDEO_RAW_TMP" "$EVENTS_VIDEO_RAW" 26 | mv "$RGB_VIDEO_RAW_TMP" "$RGB_VIDEO_RAW" 27 | 28 | # Compress videos 29 | echo -e "${BLUE}Generating compressed videos${NC}" 30 | compress_with_ffmpeg "$EVENTS_VIDEO_RAW" 31 | compress_with_ffmpeg "$RGB_VIDEO_RAW" 32 | echo -e "${GREEN}Compressed videos finished${NC}" 33 | -------------------------------------------------------------------------------- /build_system/bag_processing/hdf52media.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import os 4 | import h5py 5 | import numpy as np 6 | import cv2 7 | import sys 8 | import pdb 9 | from colorama import Fore, Style 10 | 11 | # Hardcoded topic names from the h5 12 | OVC_LEFT_TOPIC = ["ovc", "left", "data"] 13 | OVC_RIGHT_TOPIC = ["ovc", "right", "data"] 14 | OVC_RGB_TOPIC = ["ovc", "rgb", "data"] 15 | OVC_TS_TOPIC = ["ovc", "ts"] 16 | OVC_MAP_LEFT = ["ovc", "ts_map_prophesee_left_t"] 17 | OVC_MAP_RIGHT = ["ovc", "ts_map_prophesee_right_t"] 18 | PROPHESEE_LEFT_TOPIC = ["prophesee", "left"] 19 | PROPHESEE_RIGHT_TOPIC = ["prophesee", "right"] 20 | 21 | 22 | def generate_rgb_gray_vid(h5, output_file, verbose=True): 23 | print("SYNC RGB/GRAY Video Generation") 24 | 25 | # Filename is built based on the list of topics 26 | # TODO: change this for other video topics 27 | filename = "rgb_gray_" 28 | ts = h5[OVC_TS_TOPIC[0]][OVC_TS_TOPIC[1]][:] 29 | img_rgb = h5[OVC_RGB_TOPIC[0]][OVC_RGB_TOPIC[1]][OVC_RGB_TOPIC[2]] 30 | img_left = h5[OVC_LEFT_TOPIC[0]][OVC_LEFT_TOPIC[1]][OVC_LEFT_TOPIC[2]] 31 | img_right = h5[OVC_RIGHT_TOPIC[0]][OVC_RIGHT_TOPIC[1]][OVC_RIGHT_TOPIC[2]] 32 | 33 | # Get image properties from data 34 | img_w = img_rgb[0].shape[1] 35 | img_h = img_rgb[0].shape[0] 36 | 37 | # Get output image shape 38 | out_w = img_w * 3 39 | out_h = img_h 40 | fps = np.round(1e6/np.mean(np.diff(ts[:]))).astype(int) 41 | 42 | # Create VideoWriter and iterate over the frames 43 | fourcc = cv2.VideoWriter_fourcc(*"FFV1") 44 | video = cv2.VideoWriter(output_file, fourcc, 45 | fps, (out_w, out_h), True) 46 | 47 | # Loop over the timestamps 48 | for n, _ in enumerate(ts): 49 | if verbose: 50 | print(f"RGB - Processing frame {n}/{len(ts)}", end="\r") 51 | write_frame = np.zeros((out_h,out_w,3) ,dtype=img_left[n,...].dtype) 52 | write_frame[:,0:img_w,:] = img_left[n, ...] 53 | write_frame[:,img_w:img_w*2,:] = img_rgb[n, ...] 54 | write_frame[:,img_w*2:img_w*3,:] = img_right[n, ...] 55 | 56 | video.write(write_frame) 57 | 58 | video.release() 59 | os.chmod(output_file, 0o666) 60 | 61 | 62 | def generate_rgb_vid(h5, output_file, verbose=True): 63 | print("RGB Video Generation") 64 | 65 | # Filename is built based on the list of topics 66 | # TODO: change this for other video topics 67 | filename = "rgb_" 68 | ts = h5[OVC_TS_TOPIC[0]][OVC_TS_TOPIC[1]][:] 69 | img = h5[OVC_RGB_TOPIC[0]][OVC_RGB_TOPIC[1]][OVC_RGB_TOPIC[2]] 70 | 71 | # Get image properties from data 72 | img_w = img[0].shape[1] 73 | img_h = img[0].shape[0] 74 | 75 | # Get output image shape 76 | out_w = img_w 77 | out_h = img_h 78 | fps = np.round(1e6/np.mean(np.diff(ts[:]))).astype(int) 79 | 80 | # Create VideoWriter and iterate over the frames 81 | fourcc = cv2.VideoWriter_fourcc(*"FFV1") 82 | video = cv2.VideoWriter(output_file, fourcc, 83 | fps, (out_w, out_h), True) 84 | 85 | # Loop over the timestamps 86 | for n, _ in enumerate(ts): 87 | if verbose: 88 | print(f"RGB - Processing frame {n}/{len(ts)}", end="\r") 89 | write_frame = img[n, ...] 90 | video.write(write_frame) 91 | video.release() 92 | os.chmod(output_file, 0o666) 93 | 94 | 95 | def generate_synchronized_vid(h5, output_file, verbose, events=True, stereo=True): 96 | print("Synchronized Video Generation") 97 | # imgs has the pointrs to the hdf5 datasets 98 | imgs = [] 99 | ev_datasets = [] 100 | 101 | # Filename is built based on the list of topics 102 | # TODO: change this for other video topics 103 | filename = "imgs_" 104 | 105 | # Check that we have all the topics we need for this 106 | imgs.append(h5[OVC_LEFT_TOPIC[0]][OVC_LEFT_TOPIC[1]][OVC_LEFT_TOPIC[2]]) 107 | imgs.append(h5[OVC_RIGHT_TOPIC[0]][OVC_RIGHT_TOPIC[1]][OVC_RIGHT_TOPIC[2]]) 108 | ts = h5[OVC_TS_TOPIC[0]][OVC_TS_TOPIC[1]] 109 | if events: 110 | map_left = h5[OVC_MAP_LEFT[0]][OVC_MAP_LEFT[1]] 111 | map_right = h5[OVC_MAP_RIGHT[0]][OVC_MAP_RIGHT[1]] 112 | ev_datasets.append(h5[PROPHESEE_LEFT_TOPIC[0]][PROPHESEE_LEFT_TOPIC[1]]) 113 | ev_datasets.append(h5[PROPHESEE_RIGHT_TOPIC[0]][PROPHESEE_RIGHT_TOPIC[1]]) 114 | 115 | # Get image properties from data 116 | img_w = imgs[0].shape[2] 117 | img_h = imgs[0].shape[1] 118 | 119 | # Get output image shape 120 | dvs_width = 1280 121 | dvs_height = 720 122 | border = 10 123 | out_w = img_w * 2 + border 124 | if events: 125 | assert img_w == dvs_width 126 | out_h = img_h + dvs_height + border 127 | else: 128 | out_h = img_h 129 | fps = np.round(1e6/np.mean(np.diff(ts[:]))).astype(int) 130 | 131 | # Create VideoWriter and iterate over the frames 132 | fourcc = cv2.VideoWriter_fourcc(*"FFV1") 133 | video = cv2.VideoWriter(output_file, fourcc, 134 | fps, (out_w, out_h), True) 135 | 136 | # Do not iterate over the last frame to avoid index problems 137 | for n, _ in enumerate(ts[:-1]): 138 | if verbose: 139 | print(f"Synchronized - Processing frame {n}/{len(ts)}", end="\r") 140 | write_frame = None 141 | for j in range(len(imgs)): 142 | # OVC Topics 143 | # Find the index of the closest timestamp 144 | out_img = imgs[j][n, ...] 145 | 146 | # The images are monocrome 147 | out_img = np.dstack((out_img, out_img, out_img)) 148 | 149 | if j == 0: 150 | write_frame = out_img 151 | else: 152 | write_frame = np.hstack((write_frame, 153 | np.zeros((img_h, border, 3), 154 | dtype=np.uint8), 155 | out_img)) 156 | 157 | # Generate the frame with the event camera 158 | if events: 159 | x_l = ev_datasets[0]["x"][map_left[n]:map_left[n+1]] 160 | y_l = ev_datasets[0]["y"][map_left[n]:map_left[n+1]] 161 | p_l = ev_datasets[0]["p"][map_left[n]:map_left[n+1]] 162 | x_r = ev_datasets[1]["x"][map_right[n]:map_right[n+1]] 163 | y_r = ev_datasets[1]["y"][map_right[n]:map_right[n+1]] 164 | p_r = ev_datasets[1]["p"][map_right[n]:map_right[n+1]] 165 | 166 | p_l_pos = (p_l[np.newaxis] > 0).astype(np.uint8)*255 167 | p_l_neg = (p_l[np.newaxis] == 0).astype(np.uint8)*255 168 | p_r_pos = (p_r[np.newaxis] > 0).astype(np.uint8)*255 169 | p_r_neg = (p_r[np.newaxis] == 0).astype(np.uint8)*255 170 | 171 | ev_frame_l = np.zeros((dvs_height, dvs_width, 3), dtype=np.uint8) 172 | ev_frame_l[y_l, x_l, :] = np.vstack((p_l_pos, 173 | np.zeros_like(p_l_pos), 174 | p_l_neg)).T 175 | ev_frame_r = np.zeros((dvs_height, dvs_width, 3), dtype=np.uint8) 176 | ev_frame_r[y_r, x_r, :] = np.vstack((p_r_pos, 177 | np.zeros_like(p_r_pos), 178 | p_r_neg)).T 179 | 180 | ev_f = np.hstack((ev_frame_l, 181 | np.zeros((dvs_height, border, 3), dtype=np.uint8), 182 | ev_frame_r)) 183 | 184 | write_frame = np.vstack((write_frame, 185 | np.zeros((border, dvs_width*2 + border, 3), 186 | dtype=np.uint8), 187 | ev_f)) 188 | video.write(write_frame) 189 | video.release() 190 | os.chmod(output_file, 0o666) 191 | 192 | 193 | if __name__ == "__main__": 194 | parser = argparse.ArgumentParser(description="Creates vids and imgs from h5.") 195 | # input h5 file 196 | parser.add_argument("--h5fn", help="The h5 file to convert.", 197 | required=True) 198 | # output folder where we should store the results 199 | parser.add_argument("--out_events_gray", 200 | help="The output file to store the events/gray video. If not set, no events/gray video will be generated") 201 | parser.add_argument("--no-events", action="store_false", dest="events", 202 | help="Do not add events in out_events_gray.") 203 | parser.add_argument("--out_rgb", required=True, 204 | help="The output file to store the rgb video. If not set, no rgb video will be generated") 205 | parser.add_argument("--out_all_ovc", action="store_true", 206 | help="Output the synced video of all OVC sensors. If not set, no video will be generated.") 207 | parser.add_argument("--verbose", action="store_true", dest="verbose", 208 | help="Be verbose.") 209 | args = parser.parse_args() 210 | # Check that the file exists 211 | if not os.path.isfile(args.h5fn): 212 | sys.exit("The input h5 file %s does not exist." % args.h5fn) 213 | 214 | print("Converting %s to videos." % args.h5fn) 215 | 216 | # Get output folder name from input file 217 | verbose = args.verbose 218 | 219 | # Open h5 file and run the generations 220 | with h5py.File(args.h5fn, 'r') as h5: 221 | # generate_plots_imu(h5, output_folder, [h5, output_folder, "data"]) 222 | if (not ("calib" in args.h5fn and "lidar" in args.h5fn)): 223 | if args.out_events_gray is not None: 224 | # print in blue using colorama 225 | print(Fore.BLUE + "Generating events/gray video" + Style.RESET_ALL) 226 | generate_synchronized_vid(h5, args.out_events_gray, 227 | verbose, args.events) 228 | if args.out_rgb is not None: 229 | generate_rgb_vid(h5, args.out_rgb, verbose) 230 | print(Fore.BLUE + "Generating RGB video" + Style.RESET_ALL) 231 | if args.out_all_ovc: 232 | generate_rgb_gray_vid(h5, args.out_events_gray, verbose) 233 | print(Fore.BLUE + "Generating all OVC video" + Style.RESET_ALL) 234 | -------------------------------------------------------------------------------- /build_system/bag_processing/hdf52stripped.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import h5py 4 | import os 5 | import sys 6 | import pdb 7 | import subprocess 8 | from datetime import datetime 9 | from colorama import Fore, Back, Style 10 | 11 | 12 | def copy_datasets(source_file, destination_file): 13 | with h5py.File(source_file, 'r') as source_h5, h5py.File(destination_file, 'w') as destination_h5: 14 | def copy_group(group, parent_group, parent_key): 15 | for key in group.keys(): 16 | if key == "ouster": 17 | continue 18 | if parent_key == "ovc" and key == "rgb": 19 | continue 20 | item = group[key] 21 | if isinstance(item, h5py.Dataset): 22 | # Get compression settings from the source dataset 23 | compression = item.compression 24 | compression_opts = item.compression_opts 25 | shuffle = item.shuffle 26 | 27 | # Create the destination dataset with the same data and compression settings 28 | parent_group.create_dataset(key, data=item[...], 29 | compression=compression, 30 | compression_opts=compression_opts, 31 | shuffle=shuffle) 32 | elif isinstance(item, h5py.Group): 33 | print(Fore.BLUE + f"Copying {parent_key}/{key}..." + Style.RESET_ALL) 34 | new_group = parent_group.create_group(key) 35 | copy_group(item, new_group, key) 36 | else: 37 | sys.exit("Unknown type") 38 | 39 | copy_group(source_h5, destination_h5, "") 40 | # Add metadata 41 | version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() 42 | destination_h5.attrs["version"] = version 43 | destination_h5.attrs["creation_date"] = str(datetime.now()) 44 | 45 | 46 | if __name__ == "__main__": 47 | import argparse 48 | parser = argparse.ArgumentParser() 49 | parser.add_argument("--source", help="Source HDF5 file", 50 | required=True) 51 | parser.add_argument("--destination", help="Source HDF5 file", 52 | required=True) 53 | args = parser.parse_args() 54 | 55 | # Check that both files exists 56 | if not os.path.isfile(args.source): 57 | exit("Source file does not exist") 58 | 59 | copy_datasets(args.source, args.destination) 60 | 61 | -------------------------------------------------------------------------------- /build_system/bag_processing/rosbag2hdf5.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | # H5_PATH is the only file in the output_files array 5 | if [ "$IS_TEST" -eq 1 ]; then 6 | output_files=("$H5_PATH" "$STRIPPED_H5_PATH") 7 | else 8 | output_files=("$H5_PATH") 9 | fi 10 | 11 | check_files output_files 12 | check_free_space bag_multiple 13 | 14 | H5_PATH_TMP="$H5_PATH.tmp" 15 | 16 | # Run bag processing 17 | echo -e "${BLUE}Converting bag to hdf5${NC}" 18 | python3 build_system/bag_processing/rosbag2hdf5.py --bag "$BAG_PATH" \ 19 | --h5fn "$H5_PATH_TMP" \ 20 | --offset "$TIME_CORRECTIONS_PATH" \ 21 | --camchain "$CALIB_CAMCHAIN_PATH" \ 22 | --lidar_calib "$CALIB_LIDAR_PATH" \ 23 | --imu_chain "$CALIB_IMU_PATH" 24 | if [ $? -ne 0 ]; then 25 | echo -e "${RED}Error converting bag to hdf5${NC}" 26 | exit 1 27 | fi 28 | echo -e "${GREEN}Done converting bag to hdf5${NC}" 29 | 30 | # Move the temporary file to the final location 31 | chmod 666 "$H5_PATH_TMP" 32 | mv "$H5_PATH_TMP" "$H5_PATH" 33 | 34 | # Get the HDF5 size in KB 35 | BAG_FILE_SIZE_KB=$(du -k "$BAG_PATH" | cut -f1) 36 | H5_FILE_SIZE=$(du -k "$H5_PATH" | cut -f1) 37 | 38 | echo -e "${YELLOW}Bag file size in KB: $BAG_FILE_SIZE_KB${NC}" 39 | echo -e "${YELLOW}H5 file size in KB: $H5_FILE_SIZE${NC}" 40 | 41 | # Calculate and print the ratio between file sizes 42 | echo -e "${GREEN}Ratio: ${BOLD}$(echo "scale=2; $H5_FILE_SIZE / $BAG_FILE_SIZE_KB" | bc)${NC}" 43 | 44 | # check if IS_TEST is 1, and strip the output bag if so 45 | if [ "$IS_TEST" -eq 1 ]; then 46 | echo -e "${BLUE}Stripping output bag${NC}" 47 | STRIPPED_H5_PATH_TMP="$STRIPPED_H5_PATH.tmp" 48 | python3 build_system/bag_processing/hdf52stripped.py \ 49 | --source "$H5_PATH" --destination $"$STRIPPED_H5_PATH_TMP" 50 | if [ $? -ne 0 ]; then 51 | echo -e "${RED}Error converting bag to hdf5${NC}" 52 | exit 1 53 | fi 54 | echo -e "${GREEN}Done stripping bag${NC}" 55 | chmod 666 "$STRIPPED_H5_PATH_TMP" 56 | mv "$STRIPPED_H5_PATH_TMP" "$STRIPPED_H5_PATH" 57 | fi 58 | -------------------------------------------------------------------------------- /build_system/bag_processing/rosbag2timecorrection.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | # TIME_CORRECTIONS_PATH is the only output file in the array 5 | output_files=("$TIME_CORRECTIONS_PATH") 6 | check_files output_files 7 | 8 | 9 | # Run bag processing 10 | echo -e "${BLUE}Extracting time corrections from $BAG_NAME${NC}" 11 | python3 build_system/bag_processing/rosbag2timecorrection.py --bag "$BAG_PATH" --time_fn "$TIME_CORRECTIONS_PATH.tmp" 12 | if [ $? -ne 0 ]; then 13 | echo -e "${RED}Error extracting time corrections${NC}" 14 | exit 1 15 | fi 16 | echo -e "${GREEN}Time corrections extracted${NC}" 17 | 18 | # Move output file to correct location 19 | mv "$TIME_CORRECTIONS_PATH.tmp" "$TIME_CORRECTIONS_PATH" 20 | 21 | echo -e "${YELLOW}Bag file $BAG_NAME time corrections saved to $TIME_CORRECTIONS_PATH${NC}" 22 | -------------------------------------------------------------------------------- /build_system/bag_processing/rosbag2verify.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | # Set the output files to check 5 | if [[ "$BAG_PATH" == *"camera_calib"* ]]; then 6 | # For camera calibration bags, check for CAMCHAIN_PATH, REPORT_CAM_PATH, REPORT_CAM_PATH 7 | output_files=( "$CAMCHAIN_PATH" "$REPORT_CAM_PATH" "$RESULTS_CAM_PATH" ) 8 | check_files output_files 9 | elif [[ "$BAG_PATH" == *"imu_calib"* ]]; then 10 | output_files=( "$IMU_CHAIN_PATH" "$IMU_RESULTS_PATH") 11 | check_files output_files 12 | elif [[ "$BAG_PATH" == *"lidar_calib"* ]]; then 13 | # TODO: skip files for lidar_calib. Empty array 14 | echo -e "${YELLOW}TODO: Implement skip for lidar_calib${NC}" 15 | else 16 | # Data bags 17 | output_files=("$H5_PATH") 18 | check_files output_files 19 | fi 20 | 21 | echo -e "${BLUE}Verifying bag file $BAG_PATH${NC}" 22 | python3 build_system/bag_processing/rosbag2verify.py --bag "$BAG_PATH" 23 | exit $? 24 | -------------------------------------------------------------------------------- /build_system/bag_processing/rosbag2verify.py: -------------------------------------------------------------------------------- 1 | import rosbag 2 | import sys 3 | from event_array_py import Decoder 4 | from collections import defaultdict 5 | from tqdm import tqdm 6 | import numpy as np 7 | import os 8 | import pdb 9 | from colorama import Fore, Style 10 | from scipy.spatial.distance import cdist 11 | 12 | 13 | def topic_count_verify(bag_name, bag, topic, expected_freq, tolerance=0.02): 14 | print(Fore.GREEN, bag_name, topic, "[starting]", Style.RESET_ALL) 15 | info = bag.get_type_and_topic_info(topic)[1][topic] 16 | duration = bag.get_end_time() - bag.get_start_time() 17 | count = info.message_count 18 | expected_count = duration * expected_freq 19 | 20 | if abs((count - expected_count)/expected_count) > tolerance: 21 | print(Fore.RED, bag_name, topic, "[failed]", Style.RESET_ALL) 22 | print(abs((count - expected_count)/expected_count)) 23 | 24 | return False, topic, info 25 | 26 | print(Fore.GREEN, bag_name, topic, "[completed]", Style.RESET_ALL) 27 | return True, topic, info 28 | 29 | 30 | def ovc_timing_verify(bag_name, bag, tolerance=0.1, verbose=False): 31 | print(Fore.GREEN, bag_name, "[OVC TIMING CHECK]", "[starting]", Style.RESET_ALL) 32 | ovc_topics = ["/ovc/pps_cb", "/ovc/vectornav/imu", "/ovc/left/image_mono/compressed"] 33 | ros_timing_info = defaultdict(list) 34 | ros_seq_info = defaultdict(list) 35 | for topic, msg, t in tqdm(bag.read_messages(topics=ovc_topics), 36 | disable=not verbose): 37 | if topic == "/ovc/pps_cb": 38 | ros_timing_info[topic].append(msg.stamp.to_nsec()) 39 | ros_seq_info[topic].append(msg.seq) 40 | else: 41 | ros_timing_info[topic].append(msg.header.stamp.to_nsec()) 42 | ros_seq_info[topic].append(msg.header.seq) 43 | 44 | ros_timing_info = {k:np.array(v) for k,v in ros_timing_info.items()} 45 | ros_seq_info = {k:np.array(v) for k,v in ros_seq_info.items()} 46 | 47 | # Fix this!! Idea -> searchsorted 48 | matching_inds = np.searchsorted( ros_timing_info['/ovc/left/image_mono/compressed'], ros_timing_info['/ovc/pps_cb'] ) - 1 49 | 50 | imager_ts = ros_timing_info['/ovc/left/image_mono/compressed'][matching_inds][2:-2] 51 | pps_cb_ts = ros_timing_info['/ovc/pps_cb'][2:-2] 52 | 53 | vnav_offset = (pps_cb_ts - imager_ts) / 2500000 54 | vnav_int_offset = vnav_offset.round() % 16 55 | 56 | inliers = vnav_int_offset == vnav_int_offset[0] 57 | 58 | passed = (inliers.sum() / inliers.shape[0]) > 0.9 59 | 60 | info = { 61 | "imager_ts": imager_ts.tolist(), 62 | "pps_cb_ts": pps_cb_ts.tolist(), 63 | "vnav_offset": vnav_offset.tolist(), 64 | "vnav_int_offset": vnav_int_offset.tolist(), 65 | } 66 | 67 | if not passed: 68 | print(Fore.RED, bag_name, "OVC_TIMING", "[failed]", Style.RESET_ALL) 69 | 70 | return passed, "OVC_TIMING", info 71 | 72 | 73 | def event_trigger_verify(bag_name, bag, topic, verbose=True): 74 | print(Fore.GREEN, bag_name, topic, "[starting]", Style.RESET_ALL) 75 | decoder = Decoder() 76 | 77 | stats = defaultdict(int) 78 | triggers = [] 79 | event_rate = [] 80 | 81 | for topic, msg, t in tqdm(bag.read_messages(topics=topic), 82 | disable=not verbose): 83 | decoder.decode_bytes(msg.encoding, msg.width, msg.height, 84 | msg.time_base, msg.events) 85 | cd_events = decoder.get_cd_events() 86 | stats["cd_event_count"] += len(cd_events) 87 | event_rate.append(stats['cd_event_count'] / (cd_events['t'][-1]-cd_events['t'][0])) 88 | 89 | trig_events = decoder.get_ext_trig_events() 90 | stats["trig_event_count"] += len(trig_events) 91 | for e in trig_events: 92 | triggers.append(e) 93 | 94 | triggers = np.array(triggers) 95 | 96 | pos_dt = np.diff(triggers[triggers['p'] == 0]['t']).astype(float) / 1e6 97 | neg_dt = np.diff(triggers[triggers['p'] == 1]['t']).astype(float) / 1e6 98 | 99 | stats['event_rate_mean'] = np.mean(event_rate) 100 | stats['event_rate_std'] = np.std(event_rate) 101 | stats['event_rate_max'] = np.max(event_rate) 102 | stats['event_rate_min'] = np.min(event_rate) 103 | 104 | stats['pos_trigger_dt_mean'] = np.mean(pos_dt) 105 | stats['neg_trigger_dt_mean'] = np.mean(neg_dt) 106 | stats['pos_trigger_dt_std'] = np.std(pos_dt) 107 | stats['neg_trigger_dt_std'] = np.std(neg_dt) 108 | stats['pos_trigger_dt_max'] = np.max(pos_dt) 109 | stats['neg_trigger_dt_max'] = np.max(neg_dt) 110 | 111 | if (np.any(stats['pos_trigger_dt_max'] > 1.5) or 112 | np.any(stats['neg_trigger_dt_max'] > 1.5)): 113 | print(Fore.RED, bag_name, topic, "[failed]", Style.RESET_ALL) 114 | return False, topic, stats 115 | 116 | print(Fore.GREEN, bag_name, topic, "[completed]", Style.RESET_ALL) 117 | 118 | return True, topic, stats 119 | 120 | 121 | def verifications(bag): 122 | # Check if bag exists in the filesystem 123 | if not os.path.exists(bag): 124 | print("Bag not found: {}".format(bag)) 125 | sys.exit(1) 126 | 127 | # Create a rosbag object 128 | bag = rosbag.Bag(bag) 129 | 130 | # Print only the bag without the path 131 | bag_name = bag.filename.split('/')[-1] 132 | 133 | verifications = [] 134 | try: 135 | # calib_lidar bags are generally shorter, and thus the tolerance 136 | # calculation can fail (as we only have a few messages) 137 | # Increasing the tolerance for those bags helps 138 | if "calib" in bag_name and "lidar" in bag_name: 139 | print(Fore.YELLOW, "Increasing tolerance due to lidar calib bag", 140 | Style.RESET_ALL) 141 | tolerance = 0.5 142 | else: 143 | tolerance = 0.02 144 | 145 | verifications.append(ovc_timing_verify(bag_name, bag)) 146 | 147 | # Do not run GPS tests on calibration, indoor, or tunnel bags 148 | if ("calib" not in bag_name and 149 | "indoor" not in bag_name and 150 | "schuylkill_tunnel" not in bag_name): 151 | verifications.append(topic_count_verify(bag_name, bag, 152 | "/ovc/gps_trigger", 153 | 1.0, tolerance)) 154 | verifications.append(topic_count_verify(bag_name, bag, 155 | "/ublox/fix", 156 | 5.0, tolerance)) 157 | else: 158 | print(Fore.YELLOW, "Gps checks skipped in indoor bags", 159 | "or in Schuylkill tunnel bags", Style.RESET_ALL) 160 | 161 | # Check all the other common topics 162 | verifications.append(topic_count_verify(bag_name, bag, 163 | "/ovc/pps_cb", 164 | 1.0, tolerance)) 165 | verifications.append(topic_count_verify(bag_name, bag, 166 | "/ovc/vectornav/imu", 167 | 400.0, tolerance)) 168 | verifications.append(topic_count_verify(bag_name, bag, 169 | "/ovc/vectornav/mag", 170 | 400.0, tolerance)) 171 | verifications.append(topic_count_verify(bag_name, bag, 172 | "/ovc/left/image_mono/compressed", 173 | 25.0, tolerance)) 174 | verifications.append(topic_count_verify(bag_name, bag, 175 | "/ovc/right/image_mono/compressed", 176 | 25.0, tolerance)) 177 | verifications.append(topic_count_verify(bag_name, bag, 178 | "/ovc/rgb/image_color/compressed", 179 | 25.0, tolerance)) 180 | 181 | verifications.append(topic_count_verify(bag_name, bag, 182 | "/os_node/imu_packets", 183 | 100.0, tolerance)) 184 | verifications.append(topic_count_verify(bag_name, bag, 185 | "/os_node/sys_time", 186 | 100.0, tolerance)) 187 | verifications.append(topic_count_verify(bag_name, bag, 188 | "/os_node/lidar_packets", 189 | 1280.0, tolerance)) 190 | 191 | # Do not check events for lidar_calib bags 192 | if not ("calib" in bag_name and "lidar" in bag_name): 193 | verifications.append(event_trigger_verify(bag_name, bag, 194 | "/prophesee/right/events", 195 | args.verbose)) 196 | verifications.append(event_trigger_verify(bag_name, bag, 197 | "/prophesee/left/events", 198 | args.verbose)) 199 | else: 200 | print(Fore.YELLOW, "Event checks skipped in lidar_calib bags", 201 | Style.RESET_ALL) 202 | 203 | except Exception as e: 204 | print(Fore.RED, bag_name, "Exception: ", e, Style.RESET_ALL) 205 | exit_code = 1 206 | else: 207 | exit_code = 0 208 | 209 | for v in verifications: 210 | if not v[0] or args.verbose: 211 | print("========================") 212 | print(v[0]) 213 | print(v[1]) 214 | print(v[2]) 215 | print("========================") 216 | if not v[0]: 217 | exit_code = 1 218 | sys.exit(exit_code) 219 | 220 | 221 | if __name__ == "__main__": 222 | import argparse 223 | parser = argparse.ArgumentParser() 224 | parser.add_argument('--bag', 225 | help='List of bags to process, separated by a space') 226 | # verbose mode 227 | parser.add_argument('--verbose', action='store_true', 228 | help='Set Verbose Mode') 229 | args = parser.parse_args() 230 | verifications(args.bag) 231 | -------------------------------------------------------------------------------- /build_system/calibration/aprilgrid.yaml: -------------------------------------------------------------------------------- 1 | target_type: 'aprilgrid' #gridtype 2 | tagCols: 7 #number of apriltags 3 | tagRows: 5 #number of apriltags 4 | tagSize: 0.04 #size of apriltag, edge to edge [m] 5 | tagSpacing: 0.25 #ratio of space between tags to tagSize 6 | #example: tagSize=2m, spacing=0.5m --> tagSpacing=0.25[-] 7 | -------------------------------------------------------------------------------- /build_system/calibration/event_bag_convert/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(event_bag_convert) 3 | add_compile_options(-Wall -Wextra -Wpedantic -Werror) 4 | add_definitions(-DUSING_ROS_1) 5 | set(CMAKE_CXX_STANDARD 17) 6 | 7 | ## Compile as C++11, supported in ROS Kinetic and newer 8 | # add_compile_options(-std=c++11) 9 | find_package(simple_image_recon_lib) 10 | 11 | ## Find catkin macros and libraries 12 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 13 | ## is used, also find other catkin packages 14 | find_package(catkin REQUIRED COMPONENTS 15 | event_array_msgs 16 | event_array_codecs 17 | geometry_msgs 18 | rosbag 19 | roscpp 20 | sensor_msgs 21 | simple_image_recon 22 | std_msgs 23 | ) 24 | 25 | catkin_package( 26 | # INCLUDE_DIRS include 27 | # LIBRARIES event_bag_convert 28 | # CATKIN_DEPENDS event_array_msgs geometry_msgs rosbag roscpp sensor_msgs simple_image_recon std_msgs 29 | # DEPENDS system_lib 30 | ) 31 | 32 | ########### 33 | ## Build ## 34 | ########### 35 | 36 | ## Specify additional locations of header files 37 | ## Your package locations should be listed before other locations 38 | include_directories( 39 | include 40 | ${catkin_INCLUDE_DIRS} 41 | ${simple_image_recon_lib_INCLUDE_DIRS} 42 | ) 43 | 44 | add_executable(event_bag_converter src/bag_to_all_frames.cpp) 45 | 46 | target_link_libraries(event_bag_converter 47 | ${catkin_LIBRARIES} 48 | simple_image_recon_lib::simple_image_recon_lib 49 | yaml-cpp 50 | ) 51 | 52 | ## Declare a C++ library 53 | # add_library(${PROJECT_NAME} 54 | # src/${PROJECT_NAME}/event_bag_convert.cpp 55 | # ) 56 | 57 | ## Add cmake target dependencies of the library 58 | ## as an example, code may need to be generated before libraries 59 | ## either from message generation or dynamic reconfigure 60 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 61 | 62 | ## Declare a C++ executable 63 | ## With catkin_make all packages are built within a single CMake context 64 | ## The recommended prefix ensures that target names across packages don't collide 65 | # add_executable(${PROJECT_NAME}_node src/event_bag_convert_node.cpp) 66 | 67 | ## Rename C++ executable without prefix 68 | ## The above recommended prefix causes long target names, the following renames the 69 | ## target back to the shorter version for ease of user use 70 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 71 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 72 | 73 | ## Add cmake target dependencies of the executable 74 | ## same as for the library above 75 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 76 | 77 | ## Specify libraries to link a library or executable target against 78 | # target_link_libraries(${PROJECT_NAME}_node 79 | # ${catkin_LIBRARIES} 80 | # ) 81 | 82 | ############# 83 | ## Install ## 84 | ############# 85 | 86 | # all install targets should use catkin DESTINATION variables 87 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 88 | 89 | ## Mark executable scripts (Python etc.) for installation 90 | ## in contrast to setup.py, you can choose the destination 91 | # catkin_install_python(PROGRAMS 92 | # scripts/my_python_script 93 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 94 | # ) 95 | 96 | ## Mark executables for installation 97 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 98 | # install(TARGETS ${PROJECT_NAME}_node 99 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 100 | # ) 101 | 102 | ## Mark libraries for installation 103 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 104 | # install(TARGETS ${PROJECT_NAME} 105 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 106 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 107 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 108 | # ) 109 | 110 | ## Mark cpp header files for installation 111 | # install(DIRECTORY include/${PROJECT_NAME}/ 112 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 113 | # FILES_MATCHING PATTERN "*.h" 114 | # PATTERN ".svn" EXCLUDE 115 | # ) 116 | 117 | ## Mark other files for installation (e.g. launch and bag files, etc.) 118 | # install(FILES 119 | # # myfile1 120 | # # myfile2 121 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 122 | # ) 123 | 124 | ############# 125 | ## Testing ## 126 | ############# 127 | 128 | ## Add gtest based cpp test target and link libraries 129 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_event_bag_convert.cpp) 130 | # if(TARGET ${PROJECT_NAME}-test) 131 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 132 | # endif() 133 | 134 | ## Add folders to be run by python nosetests 135 | # catkin_add_nosetests(test) 136 | -------------------------------------------------------------------------------- /build_system/calibration/event_bag_convert/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | event_bag_convert 4 | 0.0.0 5 | The event_bag_convert package 6 | 7 | 8 | 9 | 10 | ken 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | event_array_msgs 53 | geometry_msgs 54 | rosbag 55 | roscpp 56 | sensor_msgs 57 | simple_image_recon 58 | std_msgs 59 | event_array_msgs 60 | geometry_msgs 61 | rosbag 62 | roscpp 63 | sensor_msgs 64 | simple_image_recon 65 | std_msgs 66 | event_array_msgs 67 | geometry_msgs 68 | rosbag 69 | roscpp 70 | sensor_msgs 71 | simple_image_recon 72 | std_msgs 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /build_system/calibration/event_bag_convert/src/bag_to_all_frames.cpp: -------------------------------------------------------------------------------- 1 | // -*-c++-*--------------------------------------------------------------------------------------- 2 | // Copyright 2023 Bernd Pfrommer 3 | // 4 | // Edited by Kenneth Chaney for dataset support 5 | // 6 | // Licensed under the Apache License, Version 2.0 (the "License"); 7 | // you may not use this file except in compliance with the License. 8 | // You may obtain a copy of the License at 9 | // 10 | // http://www.apache.org/licenses/LICENSE-2.0 11 | // 12 | // Unless required by applicable law or agreed to in writing, software 13 | // distributed under the License is distributed on an "AS IS" BASIS, 14 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | // See the License for the specific language governing permissions and 16 | // limitations under the License. 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #include 27 | 28 | #include 29 | #include 30 | 31 | #include 32 | #include 33 | 34 | using event_array_msgs::EventArray; 35 | using sensor_msgs::Image; 36 | using sensor_msgs::CompressedImage; 37 | using ApproxRecon = simple_image_recon::ApproxReconstructor< 38 | EventArray, EventArray::ConstPtr, Image, Image::ConstPtr>; 39 | 40 | int64_t retime_ovc(int64_t time_nsec, int64_t offset, double skew){ 41 | int64_t new_time = time_nsec-offset; 42 | 43 | int64_t skew_change = (new_time / 1e9) * skew; 44 | new_time += skew_change; 45 | 46 | // new_time /= 2500000; 47 | // new_time *= 2500000; 48 | 49 | return new_time; 50 | } 51 | 52 | void usage() 53 | { 54 | std::cout << "usage:" << std::endl; 55 | std::cout << "bag_to_frames -i input_bag -o output_bag -t time_sync_file" 56 | "[-c cutoff_period] [-d image_decimator]" 57 | << std::endl; 58 | } 59 | 60 | class OutBagWriter : public simple_image_recon::FrameHandler 61 | { 62 | public: 63 | explicit OutBagWriter(const std::string & bagName) 64 | { 65 | outBag_.open(bagName, rosbag::bagmode::Write); 66 | } 67 | void frame(const Image::ConstPtr & img, const std::string & topic) override 68 | { 69 | outBag_.write(topic, img->header.stamp, img); 70 | numFrames_++; 71 | if (numFrames_ % 100 == 0) { 72 | std::cout << "wrote " << numFrames_ << " frames " << std::endl; 73 | } 74 | } 75 | 76 | rosbag::Bag outBag_; 77 | size_t numFrames_{0}; 78 | }; 79 | 80 | using simple_image_recon::ApproxReconstructor; 81 | 82 | int main(int argc, char ** argv) 83 | { 84 | int opt; 85 | std::string inBagName; 86 | std::string outBagName; 87 | std::string timeSyncFileName; 88 | 89 | std::vector eventTopics{"/prophesee/left/events","/prophesee/right/events"}; 90 | std::vector ovcTopics{"/ovc/left/image_mono/compressed","/ovc/right/image_mono/compressed","/ovc/rgb/image_color/compressed"}; 91 | 92 | int cutoff_period(45); 93 | 94 | int image_decimator(1); 95 | 96 | while ((opt = getopt(argc, argv, "i:o:O:t:c:d:h")) != -1) { 97 | switch (opt) { 98 | case 'i': 99 | inBagName = optarg; 100 | break; 101 | case 'o': 102 | outBagName = optarg; 103 | break; 104 | case 't': 105 | timeSyncFileName = optarg; 106 | break; 107 | case 'c': 108 | cutoff_period = atoi(optarg); 109 | break; 110 | case 'd': 111 | image_decimator = atoi(optarg); 112 | break; 113 | case 'h': 114 | usage(); 115 | return (-1); 116 | break; 117 | default: 118 | std::cout << "unknown option: " << opt << std::endl; 119 | usage(); 120 | return (-1); 121 | break; 122 | } 123 | } 124 | 125 | if (inBagName.empty()) { 126 | std::cout << "missing input bag file name!" << std::endl; 127 | usage(); 128 | return (-1); 129 | } 130 | 131 | if (outBagName.empty()) { 132 | std::cout << "missing output bag file name!" << std::endl; 133 | usage(); 134 | return (-1); 135 | } 136 | 137 | if (timeSyncFileName.empty()) { 138 | std::cout << "missing time sync file name!" << std::endl; 139 | usage(); 140 | return (-1); 141 | } 142 | 143 | std::map offsets; 144 | std::map skews; 145 | 146 | std::cout << "Opening " << timeSyncFileName << std::endl; 147 | 148 | // This file is in micro-seconds 149 | YAML::Node time_sync_config = YAML::LoadFile(timeSyncFileName); 150 | 151 | auto ovc_topic_node = time_sync_config["/ovc/pps_cb"]; 152 | int vnav_offset = 2500*1000*ovc_topic_node["decimator"]["round_offset"].as(); 153 | 154 | offsets["ovc"] = 1000*ovc_topic_node["correction"]["offset"].as() + vnav_offset; 155 | skews["ovc"] = 1000*ovc_topic_node["correction"]["skew"].as(); 156 | 157 | auto left_topic_node = time_sync_config[eventTopics[0]]; 158 | offsets[eventTopics[0]] = 1000*left_topic_node["correction"]["offset"].as() + vnav_offset; 159 | skews[eventTopics[0]] = 1000*left_topic_node["correction"]["skew"].as(); 160 | 161 | auto right_topic_node = time_sync_config[eventTopics[1]]; 162 | offsets[eventTopics[1]] = 1000*right_topic_node["correction"]["offset"].as() + vnav_offset; 163 | skews[eventTopics[1]] = 1000*right_topic_node["correction"]["skew"].as(); 164 | 165 | std::cout << "Retrieved time synchronizations" << std::endl; 166 | std::cout << "- vnav phase offset " << vnav_offset << std::endl; 167 | std::cout << "- Offsets : " << offsets << std::endl; 168 | std::cout << "- Skews : " << skews << std::endl; 169 | std::cout << "" << std::endl; 170 | 171 | std::cout << "Settings" << std::endl; 172 | std::cout << "- Image Decimator " << image_decimator << std::endl; 173 | std::cout << "- FilterCutoff " << cutoff_period << std::endl; 174 | 175 | OutBagWriter writer(outBagName); 176 | 177 | // Grab Frame times from original bag and copy images from OVC over 178 | std::vector ovc_times; 179 | 180 | std::cout << "Opening " << inBagName << std::endl; 181 | rosbag::Bag inBag; 182 | inBag.open(inBagName, rosbag::bagmode::Read); 183 | std::cout << "- Opened" << std::endl; 184 | 185 | ros::Time first_image_time; 186 | bool set_first_image = false; 187 | 188 | std::cout << "Get frame times to write to bag" << std::endl; 189 | rosbag::View view_primary(inBag, rosbag::TopicQuery(std::vector{ovcTopics[2]})); 190 | int image_decimator_counter(0); 191 | for (const rosbag::MessageInstance & msg : view_primary) { 192 | auto m = msg.instantiate(); 193 | if(!set_first_image){ 194 | first_image_time = m->header.stamp; 195 | if( static_cast(first_image_time.toNSec()) - static_cast(offsets["ovc"]) < 0 ) { 196 | continue; 197 | }else{ 198 | set_first_image = true; 199 | } 200 | } 201 | if(set_first_image && image_decimator_counter++ % image_decimator == 0){ 202 | ovc_times.emplace_back( offsets["ovc"] + retime_ovc(m->header.stamp.toNSec(), offsets["ovc"], skews["ovc"]) ); 203 | } 204 | } 205 | 206 | std::cout << "- Requesting " << ovc_times.size() << " frames" << std::endl; 207 | std::cout << "- Start : " << ovc_times[0] << std::endl; 208 | std::cout << "- End : " << ovc_times[ovc_times.size()-1] << std::endl; 209 | std::cout << "- Start (rel) : " << ovc_times[0]-offsets["ovc"] << std::endl; 210 | std::cout << "- End (rel) : " << ovc_times[ovc_times.size()-1]-offsets["ovc"] << std::endl; 211 | 212 | std::cout << "Copying OVC frames" << std::endl; 213 | rosbag::View view_ovc(inBag, rosbag::TopicQuery(ovcTopics)); 214 | for (const rosbag::MessageInstance & msg : view_ovc) { 215 | auto m = msg.instantiate(); 216 | auto retimed = offsets["ovc"] + retime_ovc(m->header.stamp.toNSec(), offsets["ovc"], skews["ovc"]); 217 | 218 | auto find_res = std::find(ovc_times.begin(), 219 | ovc_times.end(), 220 | retimed); 221 | if(find_res != ovc_times.end()){ 222 | m->header.stamp.fromNSec( retimed ); 223 | writer.outBag_.write(msg.getTopic(), m->header.stamp, m); 224 | } 225 | } 226 | 227 | std::cout << "Building Reconstructors" << std::endl; 228 | const double fillRatio = 0.6; 229 | const int tileSize = 2; 230 | std::unordered_map recons; 231 | for (size_t i = 0; i < eventTopics.size(); i++) { 232 | 233 | std::cout << " Final topic offset " << eventTopics[i] << " : " << offsets[eventTopics[i]] << std::endl; 234 | recons.insert( 235 | {eventTopics[i], ApproxRecon( 236 | &writer, eventTopics[i], cutoff_period, 0.1, fillRatio, 237 | tileSize, offsets["ovc"]-offsets[eventTopics[i]], ovc_times)}); 238 | } 239 | 240 | size_t numMessages(0); 241 | rosbag::View view_events(inBag, rosbag::TopicQuery(eventTopics)); 242 | for (const rosbag::MessageInstance & msg : view_events) { 243 | auto it = recons.find(msg.getTopic()); 244 | if (it != recons.end()) { 245 | auto m = msg.instantiate(); 246 | it->second.processMsg(m); 247 | numMessages++; 248 | } 249 | } 250 | std::cout << "processed " << numMessages << " number of messages" 251 | << std::endl; 252 | 253 | return (0); 254 | } 255 | -------------------------------------------------------------------------------- /build_system/calibration/kalibr_all: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | # ./kalibr_events 5 | # 6 | # 7 | 8 | IMAGER_BAG=$1 9 | TARGET=$2 10 | CAMCHAIN="${IMAGER_BAG%.bag}-camchain.yaml" 11 | 12 | # Check if target is empty, and replace it with aprilgrid.yaml 13 | if [ -z "$TARGET" ]; then 14 | echo "No target specified, using aprilgrid.yaml" 15 | TARGET=aprilgrid.yaml 16 | fi 17 | 18 | if [ -f "$CAMCHAIN" ]; then 19 | echo "$CAMCHAIN exists - please remove to rerun" 20 | else 21 | export KALIBR_MANUAL_FOCAL_LENGTH_INIT=1035 22 | rosrun kalibr kalibr_calibrate_cameras \ 23 | --bag "$IMAGER_BAG" --target "$TARGET" \ 24 | --models pinhole-radtan pinhole-radtan pinhole-radtan pinhole-radtan pinhole-radtan \ 25 | --topics /prophesee/left/events /prophesee/right/events /ovc/left/image_mono/compressed /ovc/right/image_mono/compressed /ovc/rgb/image_color/compressed \ 26 | --dont-show-report 27 | fi 28 | -------------------------------------------------------------------------------- /build_system/calibration/kalibr_events: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | # ./kalibr_events 5 | # 6 | # 7 | 8 | IMAGER_BAG=$1 9 | CAMCHAIN="${IMAGER_BAG%.bag}-camchain.yaml" 10 | 11 | 12 | if [ -f $CAMCHAIN ]; then 13 | echo "$CAMCHAIN exists - please remove to rerun" 14 | else 15 | rosrun kalibr kalibr_calibrate_cameras \ 16 | --bag $IMAGER_BAG --target aprilgrid.yaml \ 17 | --models pinhole-radtan pinhole-radtan \ 18 | --topics /prophesee/left/events /prophesee/right/events \ 19 | --dont-show-report 20 | fi 21 | -------------------------------------------------------------------------------- /build_system/calibration/kalibr_ovc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | # ./kalibr_stereo_mono.bash 5 | # 6 | # 7 | 8 | IMAGER_BAG=$1 9 | IMU_BAG=$2 10 | CAMCHAIN="${IMAGER_BAG%.bag}-camchain.yaml" 11 | IMUCALIB="${IMU_BAG%.bag}-imu.yaml" 12 | IMUCAMCHAIN="${IMU_BAG%.bag}-camchain-imucam.yaml" 13 | 14 | if [ -f $IMUCAMCHAIN ]; then 15 | echo "$IMUCAMCHAIN exists - please remove to rerun" 16 | else 17 | rosrun kalibr kalibr_calibrate_imu_camera \ 18 | --bag $IMU_BAG --target aprilgrid.yaml \ 19 | --cam $CAMCHAIN --imu vn100.yaml \ 20 | --dont-show-report 21 | fi 22 | -------------------------------------------------------------------------------- /build_system/calibration/only_ovc_calib.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import numpy as np 3 | import yaml 4 | import pdb 5 | 6 | 7 | 8 | if __name__ == "__main__": 9 | import argparse 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--calib_input', type=str, required=True) 12 | parser.add_argument('--calib_output', type=str, required=True) 13 | args = parser.parse_args() 14 | 15 | # Get input yaml file onto var 16 | with open(args.calib_input, 'r') as stream: 17 | input_yaml = yaml.safe_load(stream) 18 | 19 | output_dict = {} 20 | 21 | for cam in input_yaml: 22 | # Skip prophesee cameras 23 | if cam in ["cam0", "cam1"]: 24 | continue 25 | cam_number = int(cam.split("cam")[1]) 26 | new_cam_number = cam_number - 2 27 | new_cam = f"cam{new_cam_number}" 28 | output_dict[new_cam] = input_yaml[cam] 29 | if new_cam == "cam0": 30 | del output_dict[new_cam]["T_cn_cnm1"] 31 | overlaps = list(range(3)) 32 | overlaps.remove(new_cam_number) 33 | output_dict[new_cam]["cam_overlaps"] = overlaps 34 | 35 | # Save output yaml file 36 | with open(args.calib_output, 'w') as outfile: 37 | outfile.write(yaml.dump(output_dict, default_flow_style=False)) 38 | -------------------------------------------------------------------------------- /build_system/calibration/rosbag2camcalibration.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | output_files=("$CAMCHAIN_PATH" "$REPORT_CAM_PATH" "$RESULTS_CAM_PATH") 5 | check_files output_files 6 | 7 | # Link the event_bag_convert directory to the current ROS src dir and build 8 | if [ ! -L ~/catkin_ws/src/event_bag_convert ]; then 9 | ln -s "$(pwd)"/build_system/calibration/event_bag_convert ~/catkin_ws/src/event_bag_convert 10 | fi 11 | pushd ~/catkin_ws/src 12 | catkin build --no-status 13 | popd 14 | . ~/catkin_ws/devel/setup.bash 15 | 16 | # Run the conversion script 17 | TMP_BAG="$TMP_PATH/calibration.bag" 18 | rosrun event_bag_convert event_bag_converter -i "$BAG_PATH" -o "$TMP_BAG" -t "$TIME_CORRECTIONS_PATH" -c 30 -d 5 19 | 20 | # Run the calibration script 21 | ./build_system/calibration/kalibr_all "$TMP_BAG" "./build_system/calibration/aprilgrid.yaml" 22 | 23 | # Move the result to the ouptut folder 24 | CAMCHAIN_TMP="${TMP_BAG%.bag}-camchain.yaml" 25 | REPORTS_TMP="${TMP_BAG%.bag}-report-cam.pdf" 26 | RESULTS_TMP="${TMP_BAG%.bag}-results-cam.txt" 27 | mv "$CAMCHAIN_TMP" "$CAMCHAIN_PATH" 28 | mv "$REPORTS_TMP" "$REPORT_CAM_PATH" 29 | mv "$RESULTS_TMP" "$RESULTS_CAM_PATH" 30 | -------------------------------------------------------------------------------- /build_system/calibration/rosbag2imucalibration.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | output_files=("$IMU_CHAIN_PATH" "$IMU_RESULTS_PATH" "$IMU_REPORT_PATH") 5 | check_files output_files 6 | 7 | # Run the conversion script 8 | TMP_CHAIN="$TMP_PATH/strip_camchain.yaml" 9 | TMP_BAG="$TMP_PATH/ovc_only.bag" 10 | 11 | rosbag filter "$BAG_PATH" "$TMP_BAG" "topic.startswith('/ovc')" 12 | 13 | python3 build_system/calibration/only_ovc_calib.py \ 14 | --calib_input "$CALIB_CAMCHAIN_PATH" \ 15 | --calib_output "$TMP_CHAIN" 16 | 17 | sleep 60 18 | 19 | rosrun kalibr kalibr_calibrate_imu_camera \ 20 | --bag "$TMP_BAG" \ 21 | --cam "$TMP_CHAIN" \ 22 | --imu build_system/calibration/vn100.yaml \ 23 | --target build_system/calibration/aprilgrid.yaml \ 24 | --dont-show-report 25 | 26 | # Move the result to the ouptut folder 27 | CAMCHAIN_TMP="${TMP_BAG%.bag}-camchain-imucam.yaml" 28 | RESULTS_TMP="${TMP_BAG%.bag}-results-imucam.txt" 29 | REPORT_TMP="${TMP_BAG%.bag}-report-imucam.pdf" 30 | mv "$CAMCHAIN_TMP" "$IMU_CHAIN_PATH" 31 | mv "$RESULTS_TMP" "$IMU_RESULTS_PATH" 32 | mv "$REPORT_TMP" "$IMU_REPORT_PATH" 33 | -------------------------------------------------------------------------------- /build_system/calibration/vn100.yaml: -------------------------------------------------------------------------------- 1 | #Accelerometers 2 | # Accelerometer noise density. [m/s^2*1/sqrt(Hz)] 3 | # Datasheet: 0.14 mg/√Hz = 1.372e-3 m/s^2 * 1/sqrt(Hz) 4 | accelerometer_noise_density: 1.372e-3 5 | # Accelerometer bias random walk. [m/s^3*1/sqrt(Hz)] 6 | # VNav technical support: Velocity random walk (VRW): 1.4e-3 m/s^2/sqrt(Hz) 7 | # Bias Random Walk: accel : 5e-5 [m/s^3/sqrt(Hz)] 8 | accelerometer_random_walk: 5.0e-5 9 | 10 | #Gyroscopes 11 | ## Gyro noise density. [rad/s*1/sqrt(Hz)] 12 | # Datasheet: 0.0035 °/s /√Hz = 6.10866e-05 13 | gyroscope_noise_density: 6.10866e-05 #Noise density (continuous-time) 14 | # Gyro bias random walk. [rad/s^2*1/sqrt(Hz)] 15 | # VNav technical support: Angle random walk (ARW): 6.1e-5 rad/s/sqrt(Hz) 16 | # Bias Random Walk: gyro : 4e-6 [rad/s^2/sqrt(Hz)] 17 | gyroscope_random_walk: 4.0e-6 18 | 19 | rostopic: /ovc/vectornav/imu #the IMU ROS topic 20 | update_rate: 400.0 #Hz (for discretization of the values above) 21 | -------------------------------------------------------------------------------- /build_system/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Based on https://raw.githubusercontent.com/iandouglas96/jackal_master/master/jackal/Dockerfile 2 | FROM nvidia/cuda:11.3.1-devel-ubuntu20.04 3 | 4 | SHELL ["/bin/bash", "-c"] 5 | 6 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub 7 | 8 | #Run the frontend first so it doesn't throw an error later 9 | RUN export DEBIAN_FRONTEND=noninteractive \ 10 | && apt-get update \ 11 | && export TZ="America/New_York" \ 12 | && DEBIAN_FRONTEND=noninteractive apt-get install -y keyboard-configuration \ 13 | && DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata \ 14 | && ln -fs "/usr/share/zoneinfo/$TZ" /etc/localtime \ 15 | && dpkg-reconfigure --frontend noninteractive tzdata \ 16 | && apt-get clean 17 | 18 | # Install general development tools 19 | RUN apt-get update \ 20 | && DEBIAN_FRONTEND=noninteractive apt-get install -y \ 21 | build-essential \ 22 | cmake \ 23 | cppcheck \ 24 | clang-tidy \ 25 | gdb \ 26 | git \ 27 | libcwiid-dev \ 28 | libgoogle-glog-dev \ 29 | libspnav-dev \ 30 | libusb-dev \ 31 | lsb-release \ 32 | python3-dbg \ 33 | python3-dev \ 34 | python3-empy \ 35 | python3-numpy \ 36 | python3-pip \ 37 | python3-venv \ 38 | python3-h5py \ 39 | python3-matplotlib \ 40 | python3-wxgtk4.0 \ 41 | python3-tk \ 42 | python3-igraph \ 43 | python3-pyx \ 44 | ipython3 \ 45 | software-properties-common \ 46 | sudo \ 47 | wget \ 48 | locales \ 49 | iputils-ping \ 50 | netcat \ 51 | hdf5-tools \ 52 | iproute2 \ 53 | dbus-x11 \ 54 | dialog \ 55 | fontconfig \ 56 | apt-utils \ 57 | ripgrep \ 58 | curl \ 59 | bc \ 60 | psmisc \ 61 | pybind11-dev \ 62 | && apt-get clean 63 | 64 | # Set locales 65 | RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ 66 | locale-gen 67 | ENV LC_ALL en_US.UTF-8 68 | ENV LANG en_US.UTF-8 69 | ENV LANGUAGE en_US:en 70 | 71 | 72 | # Install ROS noetic 73 | RUN sudo apt-get update \ 74 | && sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' \ 75 | && curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | sudo apt-key add - \ 76 | && sudo apt-get update \ 77 | && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 78 | python3-catkin-tools \ 79 | python3-rosdep \ 80 | python3-rosinstall \ 81 | python3-vcstool \ 82 | ros-noetic-desktop-full \ 83 | ros-noetic-pointcloud-to-laserscan \ 84 | ros-noetic-robot-localization \ 85 | ros-noetic-spacenav-node \ 86 | ros-noetic-tf2-sensor-msgs \ 87 | ros-noetic-twist-mux \ 88 | ros-noetic-velodyne-simulator \ 89 | ros-noetic-vision-msgs \ 90 | ros-noetic-mavros \ 91 | ros-noetic-mavros-dbgsym \ 92 | ros-noetic-rtcm-msgs \ 93 | libjsoncpp-dev \ 94 | libspdlog-dev \ 95 | python3-yaml \ 96 | python3-pycryptodome \ 97 | python3-gnupg \ 98 | libsuitesparse-dev \ 99 | libv4l-dev \ 100 | libceres-dev \ 101 | python3-colorama \ 102 | ros-noetic-random-numbers \ 103 | ros-noetic-pybind11-catkin \ 104 | ros-noetic-pcl-ros \ 105 | ros-noetic-rviz \ 106 | ros-noetic-tf2-geometry-msgs \ 107 | pcl-tools 108 | 109 | # Install python tools with pip 110 | RUN python3 -m pip install rospkg defusedxml osrf-pycommon tqdm 111 | 112 | # Rosdep initialization 113 | RUN sudo rosdep init \ 114 | && sudo apt-get clean \ 115 | rosdep update 116 | 117 | # Install the metavision SDK 118 | RUN sudo apt-get update \ 119 | && sudo sh -c 'echo "deb [arch=amd64 trusted=yes] https://apt.prophesee.ai/dists/public/baiTh5si/ubuntu focal sdk" > /etc/apt/sources.list.d/metavision.list' \ 120 | && sudo sh -c 'echo "deb [trusted=yes] http://ppa.launchpad.net/s-schmeisser/ogre-1.12/ubuntu focal main" >> /etc/apt/sources.list.d/metavision.list' \ 121 | && sudo apt-get update \ 122 | && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \ 123 | libcanberra-gtk-module \ 124 | mesa-utils \ 125 | python3.8-dev \ 126 | libboost-program-options-dev \ 127 | libeigen3-dev \ 128 | libpcl-dev \ 129 | libyaml-cpp-dev \ 130 | metavision-sdk \ 131 | metavision-sdk-samples \ 132 | metavision-sdk-advanced-samples \ 133 | metavision-sdk-cv-samples \ 134 | libogre-1.12-dev \ 135 | ffmpeg \ 136 | libx264-dev \ 137 | libpcap-dev \ 138 | libtins-dev \ 139 | libflatbuffers-dev 140 | 141 | # Dependencies required by metavision 142 | RUN python3 -m pip install "opencv-python==4.5.5.64" "sk-video==1.1.10" "fire==0.4.0" "numpy==1.23.4" pandas scipy 143 | 144 | # Install open3d requirement for LiDAR calibration 145 | RUN python3 -m pip install 'open3d==0.13.0' 146 | 147 | # Install the SilkyEV Driver 148 | # RUN wget https://centuryarks.com/en/wp-content/uploads/2021/11/SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4.zip \ 149 | # && unzip SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4.zip \ 150 | # && sed -i '/reload/d' SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4/CA_Silky_installer.sh\ 151 | # && sed -i '/^source/d' SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4/CA_Silky_installer.sh\ 152 | # && cd SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4 \ 153 | # && sh ./CA_Silky_installer.sh 154 | 155 | 156 | # Add a user with the same user_id as the user outside the container 157 | ARG user_id 158 | ARG user_name 159 | 160 | env USER $user_name 161 | 162 | RUN useradd -U --uid ${user_id} -m -s /bin/bash $USER \ 163 | && echo "$USER:$USER" | chpasswd \ 164 | && adduser $USER sudo \ 165 | && echo "$USER ALL=NOPASSWD: ALL" >> /etc/sudoers.d/$USER 166 | 167 | # Comands below are run as USER 168 | USER $USER 169 | 170 | # Create a ROS environment 171 | RUN mkdir -p /home/$USER/catkin_ws/src \ 172 | && cd /home/$USER/catkin_ws \ 173 | && catkin init \ 174 | && catkin config --extend /opt/ros/noetic \ 175 | && catkin config --merge-devel \ 176 | && catkin build -j 48 --no-status -DCMAKE_BUILD_TYPE=Release 177 | 178 | # Load ROS in environment 179 | RUN /bin/sh -c 'echo ". /home/$USER/catkin_ws/devel/setup.bash" >> ~/.bashrc' 180 | 181 | # Install Kalibr 182 | RUN cd /home/$USER/catkin_ws/src \ 183 | && git clone https://github.com/k-chaney/kalibr.git \ 184 | && cd kalibr \ 185 | && git checkout 21ba7fa35d52bf6702f7e53807f1138f5162e5e2 \ 186 | && cd ../.. \ 187 | && catkin build --no-status 188 | 189 | # Install metavision_ros_driver and metavision tools 190 | RUN cd /home/$USER/catkin_ws/src \ 191 | && git clone https://github.com/berndpfrommer/metavision_ros_driver.git \ 192 | && cd metavision_ros_driver \ 193 | && git checkout ab2932035200eccefcc63d943643a9020e836c61 \ 194 | && cd .. \ 195 | && git clone https://github.com/berndpfrommer/event_array_py.git \ 196 | && cd event_array_py \ 197 | && git checkout 3c8578f66610493a11c6208376a4a22870162c37 \ 198 | && cd .. \ 199 | && git clone https://github.com/k-chaney/simple_image_recon.git \ 200 | && cd simple_image_recon \ 201 | && git checkout 3ba9d1aa964e6d493d9ec8d52e8c71ea3e8f4976 \ 202 | && cd .. \ 203 | && git clone https://github.com/berndpfrommer/simple_image_recon_lib.git \ 204 | && cd simple_image_recon_lib \ 205 | && git checkout 328d723f2941131b317471d97a534aaba5438003 \ 206 | && cd .. \ 207 | && git clone https://github.com/berndpfrommer/event_array_msgs.git \ 208 | && cd event_array_msgs \ 209 | && git checkout 1.0.1 \ 210 | && cd .. \ 211 | && git clone https://github.com/berndpfrommer/event_array_codecs.git \ 212 | && cd event_array_codecs \ 213 | && git checkout a1b6be1e75e8fe \ 214 | && cd ../.. \ 215 | && catkin build --no-status 216 | 217 | # Install Ouster ROS packages 218 | RUN cd /home/$USER/catkin_ws/src \ 219 | && python3 -m pip install 'ouster-sdk[examples]==0.8.1' \ 220 | && git clone https://github.com/ouster-lidar/ouster-ros.git \ 221 | && cd ouster-ros \ 222 | && git checkout 208ee15a2a773d21194e3775d64a5b06d59e7310 \ 223 | && git submodule update --init --recursive \ 224 | && cd ../.. \ 225 | && catkin build --no-status 226 | 227 | # Install FasterLIO 228 | RUN cd /home/$USER/catkin_ws/src \ 229 | && git clone https://github.com/k-chaney/faster-lio.git \ 230 | && cd faster-lio \ 231 | && git checkout abe2ccef26650bb66a454388e2bccaf648783e11 \ 232 | && catkin build --no-status 233 | 234 | # Install InternImage following instructions from 235 | # https://github.com/OpenGVLab/InternImage/tree/master/segmentation 236 | RUN cd /home/$USER \ 237 | && wget -q https://repo.anaconda.com/archive/Anaconda3-2023.03-1-Linux-x86_64.sh \ 238 | && bash ./Anaconda3-2023.03-1-Linux-x86_64.sh -b -p /home/$USER/anaconda3 \ 239 | && git clone https://github.com/OpenGVLab/InternImage.git \ 240 | && cd InternImage \ 241 | && git checkout 631a5159e2c4e4bda16c732e64fa9584b38859ea \ 242 | && source /home/$USER/anaconda3/bin/activate \ 243 | && conda create -n internimage python=3.7 -y \ 244 | && conda activate internimage \ 245 | && pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html \ 246 | && pip install -U openmim \ 247 | && mim install mmcv-full==1.5.0 \ 248 | && pip install timm==0.6.11 mmdet==2.28.1 \ 249 | && pip install h5py mmsegmentation==0.27.0 \ 250 | && pip install opencv-python termcolor yacs pyyaml scipy 251 | #&& cd ./segmentation/ops_dcnv3 \ 252 | #&& sh ./make.sh 253 | 254 | # When running a container start in the home folder 255 | WORKDIR /home/$USER 256 | -------------------------------------------------------------------------------- /build_system/docker/docker_checks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | # M3ED Processing Preliminary Script 5 | # Run: 6 | # ./preliminary.sh 7 | # 8 | # You can get the branch name with 9 | # git rev-parse --abbrev-ref HEAD 10 | # 11 | # You can get the commit number with 12 | # git rev-parse HEAD 13 | # 14 | # This script will check that the sample data exists in the local disk and the 15 | # ROS environment is set up 16 | 17 | # Terminal colors 18 | export RED='\033[0;31m' 19 | export GREEN='\033[0;32m' 20 | export BOLD='\033[1m' 21 | export VIOLET='\033[0;35m' 22 | export BLUE='\033[0;34m' 23 | export NC='\033[0m' # No Color 24 | 25 | # Set the branch name and hash as arguments 26 | BRANCH=$1 27 | HASH=$2 28 | 29 | # Sample bag used to test that the dataset is loaded 30 | DATA_PATH=/M3ED_Build/input/raw_bags 31 | # Create array 32 | SAMPLE_BAGS=("car_urban_day_1_penno_small_loop.bag" "car_urban_day_1_horse.bag") 33 | 34 | # Fancy banner when the script starts 35 | echo -e "${VIOLET}${BOLD}------------------------------${NC}" 36 | echo -e "${VIOLET}${BOLD}M3ED Container and host checks${NC}" 37 | echo -e "${VIOLET}${BOLD}------------------------------${NC}" 38 | 39 | # ===================== PRELIMINARY CHECKS ===================== 40 | 41 | # Print the current date and time 42 | echo -e "${GREEN}Date: $(date)${NC}" 43 | 44 | # Print the current user 45 | echo -e "${GREEN}User: $(whoami)${NC}" 46 | # 47 | 48 | # Print the current branch name and hash name 49 | if [ -z "$BRANCH" ] 50 | then 51 | echo -e "${RED}Branch argument is NULL. Quitting.${NC}" 52 | exit 1 53 | fi 54 | echo -e "${GREEN}Branch: ${BRANCH}${NC}" 55 | if [ -z "$HASH" ] 56 | then 57 | echo -e "${RED}Hash argument is NULL. Quitting.${NC}" 58 | exit 1 59 | fi 60 | echo -e "${GREEN}Hash: ${HASH}${NC}" 61 | 62 | # Source ROS environment 63 | . ~/catkin_ws/devel/setup.bash 64 | RVERSION=$(rosversion -d) 65 | echo -e "${GREEN}ROS version: ${RVERSION}${NC}" 66 | 67 | # Check that the folder exists 68 | if [ ! -d "${DATA_PATH}" ] 69 | then 70 | echo -e "${RED}Data path ${DATA_PATH} not found. Is the NFS share mounted? Quitting.${NC}" 71 | exit 1 72 | else 73 | echo -e "${GREEN}Data path ${DATA_PATH} found.${NC}" 74 | fi 75 | 76 | # Check that each one of the sample bags exists 77 | for SAMPLE_BAG in "${SAMPLE_BAGS[@]}" 78 | do 79 | if [ ! -f "${DATA_PATH}/${SAMPLE_BAG}" ] 80 | then 81 | echo -e "${RED}Sample bag ${SAMPLE_BAG} not found. Quitting.${NC}" 82 | exit 1 83 | else 84 | echo -e "${GREEN}Sample bag ${SAMPLE_BAG} found.${NC}" 85 | fi 86 | done 87 | 88 | # Test if nvidia-smi is present and executable 89 | if ! command -v nvidia-smi &> /dev/null 90 | then 91 | echo -e "${RED}nvidia-smi could not be executed.${NC}" 92 | exit 1 93 | else 94 | echo -e "${GREEN}nvidia-smi found.${NC}" 95 | fi 96 | 97 | # Test for presence of GPUs 98 | gpu_count=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) 99 | if [ "$gpu_count" -gt 0 ] 100 | then 101 | echo -e "${GREEN}Valid GPUs detected: $gpu_count${NC}" 102 | else 103 | echo -e "${RED}No valid GPUs detected.${NC}" 104 | exit 1 105 | fi 106 | 107 | 108 | echo -e "${GREEN}${BOLD}Preliminary checks passed.\n${NC}" 109 | exit 0 110 | -------------------------------------------------------------------------------- /build_system/jenkins/Jenkinsfile_CPU_stage1: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | docker { 4 | image 'm3ed/m3ed:latest' 5 | args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' 6 | label 'CPU_node' 7 | } 8 | } 9 | parameters { 10 | string(name: 'CHECKOUT_COMMIT', defaultValue: '', 11 | description: 'Commit to checkout') 12 | string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') 13 | } 14 | options { 15 | ansiColor('xterm') 16 | } 17 | stages { 18 | stage('Checkout') { 19 | steps { 20 | sh "git checkout ${params.CHECKOUT_COMMIT}" 21 | } 22 | } 23 | stage('Verify Bag') { 24 | steps { 25 | sh "./build_system/bag_processing/rosbag2verify.bash ${params.BAG_NAME}" 26 | } 27 | } 28 | stage('Time Corrections') { 29 | steps { 30 | script { 31 | sh "./build_system/bag_processing/rosbag2timecorrection.bash ${params.BAG_NAME}" 32 | } 33 | } 34 | } 35 | stage('Parallel 1') { 36 | parallel { 37 | stage('HDF5') { 38 | steps { 39 | sh "./build_system/bag_processing/rosbag2hdf5.bash ${params.BAG_NAME}" 40 | } 41 | } 42 | stage('Ouster bag and Faster-LIO') { 43 | when { 44 | expression { !params.BAG_NAME.contains('calib') } 45 | } 46 | steps { 47 | sh "./build_system/lidar_depth/rosbag2pcd.bash ${params.BAG_NAME}" 48 | } 49 | } 50 | stage('Kalibr Cam') { 51 | when { 52 | expression { params.BAG_NAME.contains('camera_calib') } 53 | } 54 | steps { 55 | sh "./build_system/calibration/rosbag2camcalibration.bash ${params.BAG_NAME}" 56 | } 57 | } 58 | stage('Kalibr Imu') { 59 | when { 60 | expression { params.BAG_NAME.contains('imu_calib') } 61 | } 62 | steps { 63 | sh "./build_system/calibration/rosbag2imucalibration.bash ${params.BAG_NAME}" 64 | } 65 | } 66 | } 67 | } 68 | 69 | stage('Generate GT') { 70 | when { 71 | expression { !params.BAG_NAME.contains('calib') } 72 | } 73 | steps { 74 | sh "./build_system/lidar_depth/pcd2gt.bash ${params.BAG_NAME}" 75 | } 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /build_system/jenkins/Jenkinsfile_CPU_stage2: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | docker { 4 | image 'm3ed/m3ed:latest' 5 | args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' 6 | label 'CPU_node' 7 | } 8 | } 9 | parameters { 10 | string(name: 'CHECKOUT_COMMIT', defaultValue: '', 11 | description: 'Commit to checkout') 12 | string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') 13 | } 14 | options { 15 | ansiColor('xterm') 16 | } 17 | stages { 18 | stage('Checkout') { 19 | steps { 20 | sh "git checkout ${params.CHECKOUT_COMMIT}" 21 | } 22 | } 23 | stage('Parallel 1') { 24 | parallel { 25 | stage('Image Videos') { 26 | when { 27 | expression { !params.BAG_NAME.contains('calib') } 28 | } 29 | steps { 30 | sh "./build_system/bag_processing/hdf52media.bash ${params.BAG_NAME}" 31 | } 32 | } 33 | stage('Depth Video') { 34 | when { 35 | expression { !params.BAG_NAME.contains('calib') } 36 | } 37 | steps { 38 | sh "./build_system/lidar_depth/gt2media.bash ${params.BAG_NAME}" 39 | } 40 | } 41 | } 42 | } 43 | 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /build_system/jenkins/Jenkinsfile_CPU_stage3: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | docker { 4 | image 'm3ed/m3ed:latest' 5 | args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' 6 | label 'CPU_node' 7 | } 8 | } 9 | parameters { 10 | string(name: 'CHECKOUT_COMMIT', defaultValue: '', 11 | description: 'Commit to checkout') 12 | string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') 13 | } 14 | options { 15 | ansiColor('xterm') 16 | } 17 | stages { 18 | stage('Checkout') { 19 | steps { 20 | sh "git checkout ${params.CHECKOUT_COMMIT}" 21 | } 22 | } 23 | stage('Check GT loop error') { 24 | steps { 25 | sh "./build_system/lidar_depth/gt2verify.bash ${params.BAG_NAME}" 26 | } 27 | } 28 | stage('Convert GT to EVO format') { 29 | steps { 30 | sh "./build_system/lidar_depth/gt2evo.bash ${params.BAG_NAME}" 31 | } 32 | } 33 | stage('Get Stats') { 34 | steps { 35 | sh "./build_system/stats_and_summary/getStats.bash ${params.BAG_NAME}" 36 | } 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /build_system/jenkins/Jenkinsfile_GPU: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | docker { 4 | image 'm3ed/m3ed:latest' 5 | args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' 6 | label 'GPU_node' 7 | } 8 | } 9 | parameters { 10 | string(name: 'CHECKOUT_COMMIT', defaultValue: '', description: 'Commit to checkout') 11 | string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') 12 | string(name: 'GPU_TARGET', defaultValue: '', description: 'GPU to run the code') 13 | } 14 | options { 15 | ansiColor('xterm') 16 | } 17 | stages { 18 | stage('Checkout') { 19 | steps { 20 | sh "git checkout ${params.CHECKOUT_COMMIT}" 21 | } 22 | } 23 | stage ('InternImage') { 24 | steps { 25 | echo "GPU_TARGET: ${params.GPU_TARGET}" 26 | sh "./build_system/semantics/hdf52internimage.bash ${params.BAG_NAME} ${params.GPU_TARGET}" 27 | } 28 | } 29 | stage('Semantics Video') { 30 | steps { 31 | sh "./build_system/semantics/internimage2media.bash ${params.BAG_NAME}" 32 | } 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /build_system/jenkins/Jenkinsfile_dispatcher: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | environment { 4 | BAG_PATH = "/M3ED_Build/input/raw_bags" 5 | } 6 | 7 | stages { 8 | //// We first build the docker image in each of the hosts by executing the M3ED-docker-build pipeline 9 | //stage('Build Docker Image on Host and Test') { 10 | // steps { 11 | // script { 12 | // echo "Docker container build" 13 | // // Get list of all hosts with label 'runner' 14 | // def label = 'docker' 15 | // def nodesWithLabel = Jenkins.getInstance().getLabel(label).getNodes() 16 | // def nodeNamesWithLabel = nodesWithLabel.collect { node -> node.nodeName } 17 | // echo "Nodes with Label '${label}': ${nodeNamesWithLabel}" 18 | // for (int i =0; i < nodeNamesWithLabel.size(); i++) { 19 | // def nodeName = nodeNamesWithLabel[i] 20 | // build job: 'M3ED-docker-build', wait: true, parameters: [ 21 | // string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}") 22 | // ] 23 | // } 24 | // } 25 | // } 26 | //} 27 | stage('Set Files for Other Branches') { 28 | // Only run when branch is not main or origin/main 29 | when { 30 | not { 31 | expression { 32 | return env.GIT_BRANCH == "origin/main" 33 | } 34 | } 35 | } 36 | steps { 37 | script { 38 | env.DATA_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data --short", returnStdout: true).trim() 39 | env.DATA_SEMANTICS_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data_semantics --short", returnStdout: true).trim() 40 | env.IMU_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files imu_calib --short", returnStdout: true).trim() 41 | env.CAMERA_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files camera_calib --short", returnStdout: true).trim() 42 | } 43 | } 44 | } 45 | stage('Set Files for main Branch') { 46 | // Only run when branch is origin/main 47 | when { 48 | expression { 49 | return env.GIT_BRANCH == "origin/main" 50 | } 51 | } 52 | steps { 53 | script { 54 | env.DATA_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data", returnStdout: true).trim() 55 | env.DATA_SEMANTICS_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data_semantics", returnStdout: true).trim() 56 | env.IMU_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files imu_calib", returnStdout: true).trim() 57 | env.CAMERA_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files camera_calib", returnStdout: true).trim() 58 | } 59 | } 60 | } 61 | stage('CPU Stage 1') { 62 | steps { 63 | script { 64 | // Process "calib" files first, order is defined by allFiles order 65 | def camera_calib_files = env.CAMERA_CALIB_FILES.split(',') 66 | def imu_calib_files = env.IMU_CALIB_FILES.split(',') 67 | def dataFiles = env.DATA_FILES.split(',') 68 | def allFiles = [camera_calib_files, imu_calib_files, dataFiles] 69 | 70 | echo "CPU Stage 1" 71 | for (int f = 0; f < 3; f++) { 72 | files = allFiles[f] 73 | // Dispatch files to nodes 74 | // https://www.jenkins.io/doc/pipeline/examples/#jobs-in-parallel 75 | def builds = [:] 76 | def statuses = [:] 77 | for (int i = 0; i < files.size(); i++) { 78 | def index = i 79 | def file = files[index] 80 | builds[file] = { 81 | // Avoid issues with wait period 82 | def status = build job: 'M3ED-CPU-Stage1', wait: true, parameters: [ 83 | string(name: 'BAG_NAME', value: file), 84 | string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), 85 | string(name: 'dummy', value: "${index}")], quietPeriod: 2 86 | statuses[file]=status 87 | } 88 | } 89 | parallel builds 90 | sleep 30 // Kill issues with container 91 | for (int i=0; i < files.size(); i++) { 92 | def index = i 93 | def file = files[index] 94 | if (statuses[file].result != 'SUCCESS') { 95 | error "Pipeline ${file} failed with status ${statuses[file].result}" 96 | } 97 | } 98 | } 99 | } 100 | } 101 | } 102 | stage('Parallel') { 103 | parallel { 104 | stage('GPU') { 105 | steps { 106 | script { 107 | echo "GPU" 108 | def sem_files = env.DATA_SEMANTICS_FILES.split(',') 109 | def par_jobs = 16 110 | def num_gpu = 4 111 | def N = sem_files.size() 112 | def builds = [:] 113 | def statuses = [:] 114 | 115 | for (int i = 0; i < N; i+= par_jobs) { 116 | builds.clear() 117 | statuses.clear() 118 | for (int j = i; j < i + par_jobs && j < N; j++) { 119 | def index = j 120 | def file = sem_files[index] 121 | def GPU = j % num_gpu 122 | builds[file] = { 123 | // Avoid issues with wait period 124 | def status = build job: 'M3ED-GPU', wait: true, parameters: [ 125 | string(name: 'BAG_NAME', value: file), 126 | string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), 127 | string(name: 'GPU_TARGET', value: "${GPU}"), 128 | string(name: 'dummy', value: "${index}")], quietPeriod: 2 129 | statuses[file]=status 130 | } 131 | } 132 | parallel builds 133 | for (int j = i; j < i + par_jobs && j < N; j++) { 134 | def index = j 135 | def file = sem_files[index] 136 | if (statuses[file].result != 'SUCCESS') { 137 | error "Pipeline ${file} failed with status ${statuses[file].result}" 138 | } 139 | } 140 | } 141 | } 142 | } 143 | } 144 | stage('CPU Stage 2') { 145 | steps { 146 | script { 147 | echo "CPU Stage 2" 148 | def dataFiles = env.DATA_FILES.split(',') 149 | 150 | def builds = [:] 151 | def statuses = [:] 152 | for (int i =0; i < dataFiles.size(); i++) { 153 | def index = i 154 | def file = files[index] 155 | builds[file] = { 156 | // Avoid issues with wait period 157 | def status = build job: 'M3ED-CPU-Stage2', wait: true, parameters: [ 158 | string(name: 'BAG_NAME', value: file), 159 | string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), 160 | string(name: 'dummy', value: "${index}")], quietPeriod: 2 161 | statuses[file]=status 162 | } 163 | } 164 | parallel builds 165 | for (int i =0; i < files.size(); i++) { 166 | def index = i 167 | def file = files[index] 168 | if (statuses[file].result != 'SUCCESS') { 169 | error "Pipeline ${file} failed with status ${statuses[file].result}" 170 | } 171 | } 172 | } 173 | } 174 | } 175 | } 176 | } 177 | stage('CPU Stage 3') { 178 | steps { 179 | script { 180 | echo "CPU Stage 3" 181 | def dataFiles = env.DATA_FILES.split(',') 182 | def builds = [:] 183 | def statuses = [:] 184 | for (int i =0; i < dataFiles.size(); i++) { 185 | def index = i 186 | def file = files[index] 187 | builds[file] = { 188 | // Avoid issues with wait period 189 | def status = build job: 'M3ED-CPU-Stage3', wait: true, parameters: [ 190 | string(name: 'BAG_NAME', value: file), 191 | string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), 192 | string(name: 'dummy', value: "${index}")], quietPeriod: 2 193 | statuses[file]=status 194 | } 195 | } 196 | parallel builds 197 | for (int i =0; i < files.size(); i++) { 198 | def index = i 199 | def file = files[index] 200 | if (statuses[file].result != 'SUCCESS') { 201 | error "Pipeline ${file} failed with status ${statuses[file].result}" 202 | } 203 | } 204 | } 205 | } 206 | } 207 | } 208 | } 209 | 210 | -------------------------------------------------------------------------------- /build_system/jenkins/Jenkinsfile_docker_build: -------------------------------------------------------------------------------- 1 | def user_id 2 | def user_name 3 | node { 4 | user_id = sh(returnStdout: true, script: 'id -u').trim() 5 | user_name = sh(returnStdout: true, script: 'whoami').trim() 6 | } 7 | 8 | pipeline { 9 | agent { 10 | dockerfile { 11 | dir 'build_system/docker' 12 | additionalBuildArgs "--build-arg user_id=${user_id} --build-arg user_name=${user_name} -t m3ed/m3ed:latest" 13 | args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' 14 | 15 | } 16 | } 17 | parameters { 18 | string(name: 'CHECKOUT_COMMIT', defaultValue: '', 19 | description: 'Commit to checkout') 20 | } 21 | options { 22 | ansiColor('xterm') 23 | } 24 | stages { 25 | stage('Checkout') { 26 | steps { 27 | sh "git checkout ${params.CHECKOUT_COMMIT}" 28 | } 29 | } 30 | stage ('Check System') { 31 | steps { 32 | script { 33 | def hash = sh(returnStdout: true, script: 'git rev-parse --abbrev-ref HEAD').trim() 34 | def branch = sh(returnStdout: true, script: 'git rev-parse HEAD').trim() 35 | sh "./build_system/docker/docker_checks.sh ${hash} ${branch}" 36 | } 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /build_system/lidar_depth/concatenate_pcd_uncompressed/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(concatenate_pcd_uncompressed) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | roscpp 12 | pcl_ros 13 | ) 14 | 15 | ## System dependencies are found with CMake's conventions 16 | # find_package(Boost REQUIRED COMPONENTS system) 17 | 18 | 19 | ## Uncomment this if the package has a setup.py. This macro ensures 20 | ## modules and global scripts declared therein get installed 21 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 22 | # catkin_python_setup() 23 | 24 | ################################################ 25 | ## Declare ROS messages, services and actions ## 26 | ################################################ 27 | 28 | ## To declare and build messages, services or actions from within this 29 | ## package, follow these steps: 30 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 31 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 32 | ## * In the file package.xml: 33 | ## * add a build_depend tag for "message_generation" 34 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 35 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 36 | ## but can be declared for certainty nonetheless: 37 | ## * add a exec_depend tag for "message_runtime" 38 | ## * In this file (CMakeLists.txt): 39 | ## * add "message_generation" and every package in MSG_DEP_SET to 40 | ## find_package(catkin REQUIRED COMPONENTS ...) 41 | ## * add "message_runtime" and every package in MSG_DEP_SET to 42 | ## catkin_package(CATKIN_DEPENDS ...) 43 | ## * uncomment the add_*_files sections below as needed 44 | ## and list every .msg/.srv/.action file to be processed 45 | ## * uncomment the generate_messages entry below 46 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 47 | 48 | ## Generate messages in the 'msg' folder 49 | # add_message_files( 50 | # FILES 51 | # Message1.msg 52 | # Message2.msg 53 | # ) 54 | 55 | ## Generate services in the 'srv' folder 56 | # add_service_files( 57 | # FILES 58 | # Service1.srv 59 | # Service2.srv 60 | # ) 61 | 62 | ## Generate actions in the 'action' folder 63 | # add_action_files( 64 | # FILES 65 | # Action1.action 66 | # Action2.action 67 | # ) 68 | 69 | ## Generate added messages and services with any dependencies listed here 70 | # generate_messages( 71 | # DEPENDENCIES 72 | # std_msgs # Or other packages containing msgs 73 | # ) 74 | 75 | ################################################ 76 | ## Declare ROS dynamic reconfigure parameters ## 77 | ################################################ 78 | 79 | ## To declare and build dynamic reconfigure parameters within this 80 | ## package, follow these steps: 81 | ## * In the file package.xml: 82 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 83 | ## * In this file (CMakeLists.txt): 84 | ## * add "dynamic_reconfigure" to 85 | ## find_package(catkin REQUIRED COMPONENTS ...) 86 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 87 | ## and list every .cfg file to be processed 88 | 89 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 90 | # generate_dynamic_reconfigure_options( 91 | # cfg/DynReconf1.cfg 92 | # cfg/DynReconf2.cfg 93 | # ) 94 | 95 | ################################### 96 | ## catkin specific configuration ## 97 | ################################### 98 | ## The catkin_package macro generates cmake config files for your package 99 | ## Declare things to be passed to dependent projects 100 | ## INCLUDE_DIRS: uncomment this if your package contains header files 101 | ## LIBRARIES: libraries you create in this project that dependent projects also need 102 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 103 | ## DEPENDS: system dependencies of this project that dependent projects also need 104 | catkin_package( 105 | INCLUDE_DIRS 106 | LIBRARIES concatenate_pcd_uncompressed 107 | CATKIN_DEPENDS roscpp 108 | DEPENDS roscpp pcl_ros pcl_conversions 109 | ) 110 | 111 | ########### 112 | ## Build ## 113 | ########### 114 | 115 | ## Specify additional locations of header files 116 | ## Your package locations should be listed before other locations 117 | include_directories( 118 | ${catkin_INCLUDE_DIRS} 119 | ) 120 | 121 | ## Declare a C++ library 122 | # add_library(${PROJECT_NAME} 123 | # src/${PROJECT_NAME}/concatenate_pcd_uncompressed.cpp 124 | # ) 125 | 126 | ## Add cmake target dependencies of the library 127 | ## as an example, code may need to be generated before libraries 128 | ## either from message generation or dynamic reconfigure 129 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 130 | 131 | ## Declare a C++ executable 132 | ## With catkin_make all packages are built within a single CMake context 133 | ## The recommended prefix ensures that target names across packages don't collide 134 | add_executable(${PROJECT_NAME} src/pcl_concatenate_points_pcd.cpp) 135 | 136 | ## Rename C++ executable without prefix 137 | ## The above recommended prefix causes long target names, the following renames the 138 | ## target back to the shorter version for ease of user use 139 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 140 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 141 | 142 | ## Add cmake target dependencies of the executable 143 | ## same as for the library above 144 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 145 | 146 | ## Specify libraries to link a library or executable target against 147 | target_link_libraries(${PROJECT_NAME} 148 | ${catkin_LIBRARIES} 149 | ) 150 | 151 | ############# 152 | ## Install ## 153 | ############# 154 | 155 | # all install targets should use catkin DESTINATION variables 156 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 157 | 158 | ## Mark executable scripts (Python etc.) for installation 159 | ## in contrast to setup.py, you can choose the destination 160 | # catkin_install_python(PROGRAMS 161 | # scripts/my_python_script 162 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 163 | # ) 164 | 165 | ## Mark executables for installation 166 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 167 | # install(TARGETS ${PROJECT_NAME}_node 168 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 169 | # ) 170 | 171 | ## Mark libraries for installation 172 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 173 | # install(TARGETS ${PROJECT_NAME} 174 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 175 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 176 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 177 | # ) 178 | 179 | ## Mark cpp header files for installation 180 | # install(DIRECTORY include/${PROJECT_NAME}/ 181 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 182 | # FILES_MATCHING PATTERN "*.h" 183 | # PATTERN ".svn" EXCLUDE 184 | # ) 185 | 186 | ## Mark other files for installation (e.g. launch and bag files, etc.) 187 | # install(FILES 188 | # # myfile1 189 | # # myfile2 190 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 191 | # ) 192 | 193 | ############# 194 | ## Testing ## 195 | ############# 196 | 197 | ## Add gtest based cpp test target and link libraries 198 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_concatenate_pcd_uncompressed.cpp) 199 | # if(TARGET ${PROJECT_NAME}-test) 200 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 201 | # endif() 202 | 203 | ## Add folders to be run by python nosetests 204 | # catkin_add_nosetests(test) 205 | -------------------------------------------------------------------------------- /build_system/lidar_depth/concatenate_pcd_uncompressed/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | concatenate_pcd_uncompressed 4 | 0.0.0 5 | The concatenate_pcd_uncompressed package 6 | 7 | 8 | 9 | 10 | fclad 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | roscpp 53 | pcl_ros 54 | pcl_conversions 55 | roscpp 56 | roscpp 57 | pcl_ros 58 | pcl_conversions 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /build_system/lidar_depth/concatenate_pcd_uncompressed/src/pcl_concatenate_points_pcd.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Software License Agreement (BSD License) 3 | * 4 | * Point Cloud Library (PCL) - www.pointclouds.org 5 | * Copyright (c) 2010, Willow Garage, Inc. 6 | * Copyright (c) 2012-, Open Perception, Inc. 7 | * 8 | * All rights reserved. 9 | * 10 | * Redistribution and use in source and binary forms, with or without 11 | * modification, are permitted provided that the following conditions 12 | * are met: 13 | * 14 | * * Redistributions of source code must retain the above copyright 15 | * notice, this list of conditions and the following disclaimer. 16 | * * Redistributions in binary form must reproduce the above 17 | * copyright notice, this list of conditions and the following 18 | * disclaimer in the documentation and/or other materials provided 19 | * with the distribution. 20 | * * Neither the name of the copyright holder(s) nor the names of its 21 | * contributors may be used to endorse or promote products derived 22 | * from this software without specific prior written permission. 23 | * 24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 32 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 34 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 | * POSSIBILITY OF SUCH DAMAGE. 36 | * 37 | * $Id$ 38 | * 39 | */ 40 | 41 | /** 42 | 43 | @b pcd_concatenate_points exemplifies how to concatenate the points of two PointClouds having the same fields. 44 | 45 | **/ 46 | 47 | #include 48 | #include 49 | #include 50 | 51 | Eigen::Vector4f translation; 52 | Eigen::Quaternionf orientation; 53 | 54 | //////////////////////////////////////////////////////////////////////////////// 55 | /** \brief Parse command line arguments for file names. 56 | * Returns: a vector with file names indices. 57 | * \param argc 58 | * \param argv 59 | * \param extension 60 | */ 61 | std::vector 62 | parseFileExtensionArgument (int argc, char** argv, std::string extension) 63 | { 64 | std::vector indices; 65 | for (int i = 1; i < argc; ++i) 66 | { 67 | std::string fname = std::string (argv[i]); 68 | 69 | // Needs to be at least 4: .ext 70 | if (fname.size () <= 4) 71 | continue; 72 | 73 | // For being case insensitive 74 | std::transform (fname.begin (), fname.end (), fname.begin (), tolower); 75 | std::transform (extension.begin (), extension.end (), extension.begin (), tolower); 76 | 77 | // Check if found 78 | std::string::size_type it; 79 | if ((it = fname.find (extension)) != std::string::npos) 80 | { 81 | // Additional check: we want to be able to differentiate between .p and .png 82 | if ((extension.size () - (fname.size () - it)) == 0) 83 | indices.push_back (i); 84 | } 85 | } 86 | return (indices); 87 | } 88 | 89 | bool 90 | loadCloud (const std::string &filename, pcl::PCLPointCloud2 &cloud) 91 | { 92 | using namespace pcl::console; 93 | TicToc tt; 94 | print_highlight ("Loading "); print_value ("%s ", filename.c_str ()); 95 | 96 | tt.tic (); 97 | if (pcl::io::loadPCDFile (filename, cloud, translation, orientation) < 0) 98 | return (false); 99 | print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", cloud.width * cloud.height); print_info (" points]\n"); 100 | print_info ("Available dimensions: "); print_value ("%s\n", pcl::getFieldsList (cloud).c_str ()); 101 | 102 | return (true); 103 | } 104 | 105 | void 106 | saveCloud (const std::string &filename, const pcl::PCLPointCloud2 &output) 107 | { 108 | using namespace pcl::console; 109 | TicToc tt; 110 | tt.tic (); 111 | 112 | print_highlight ("Saving "); print_value ("%s ", filename.c_str ()); 113 | 114 | pcl::PCDWriter w; 115 | w.writeBinary (filename, output, translation, orientation); 116 | 117 | print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", output.width * output.height); print_info (" points]\n"); 118 | } 119 | 120 | 121 | /* ---[ */ 122 | int 123 | main (int argc, char** argv) 124 | { 125 | if (argc < 2) 126 | { 127 | std::cerr << "Syntax is: " << argv[0] << " " << std::endl; 128 | std::cerr << "Result will be saved to output.pcd" << std::endl; 129 | return (-1); 130 | } 131 | 132 | std::vector file_indices = parseFileExtensionArgument (argc, argv, ".pcd"); 133 | 134 | //pcl::PointCloud cloud_all; 135 | pcl::PCLPointCloud2 cloud_all; 136 | for (const int &file_index : file_indices) 137 | { 138 | // Load the Point Cloud 139 | pcl::PCLPointCloud2 cloud; 140 | loadCloud (argv[file_index], cloud); 141 | //pcl::PointCloud cloud; 142 | //pcl::io::loadPCDFile (argv[file_indices[i]], cloud); 143 | //cloud_all += cloud; 144 | pcl::concatenate (cloud_all, cloud, cloud_all); 145 | PCL_INFO ("Total number of points so far: %u. Total data size: %lu bytes.\n", cloud_all.width * cloud_all.height, cloud_all.data.size ()); 146 | } 147 | 148 | saveCloud ("output.pcd", cloud_all); 149 | 150 | return (0); 151 | } 152 | /* ]--- */ 153 | -------------------------------------------------------------------------------- /build_system/lidar_depth/fasterlio_gt.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pdb 3 | from functools import reduce 4 | import multiprocessing 5 | import subprocess 6 | from datetime import datetime 7 | 8 | import argparse 9 | 10 | import h5py 11 | import numpy as np 12 | import open3d as o3d 13 | import pandas as pd 14 | import cv2 15 | import matplotlib.pyplot as plt 16 | from scipy.spatial.transform import Rotation 17 | 18 | from tqdm import tqdm 19 | 20 | from util import transform_inv, merge_pcs, load_clouds, load_trajectory, filter_global_cloud 21 | 22 | def get_view(L0_points, camera_location, camera_calib): 23 | 24 | scan_aggregation = camera_calib["scan_aggregation"] 25 | 26 | assert isinstance(scan_aggregation, int) and scan_aggregation >= 4 and scan_aggregation <= 400 27 | 28 | # print(f" - scan_aggregation: {scan_aggregation}") 29 | 30 | cam_idx = camera_location['idx'] 31 | start_idx = max(0,cam_idx-scan_aggregation) 32 | stop_idx = min(cam_idx+scan_aggregation, len(L0_points)) 33 | 34 | L0_cloud = o3d.geometry.PointCloud() 35 | L0_cloud.points = o3d.utility.Vector3dVector(np.concatenate( L0_points[start_idx:stop_idx] )) 36 | 37 | Ln_cloud = L0_cloud.transform(camera_location['Ln_T_L0']) 38 | Cn_cloud = Ln_cloud.transform(camera_calib["Cn_T_lidar"]) 39 | 40 | z_limit = 250 41 | camera_fov = 64/180*np.pi # The ec FoV is 62, but we are taking a bit more 42 | xy_limit = z_limit * np.tan(camera_fov/2) 43 | 44 | bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound=(-xy_limit, -xy_limit, 0.05), 45 | max_bound=(xy_limit, xy_limit, z_limit)) 46 | Cn_cloud_crop = Cn_cloud.crop(bbox) 47 | 48 | diameter = np.linalg.norm(np.array(Cn_cloud_crop.get_max_bound()) - 49 | np.array(Cn_cloud_crop.get_min_bound())) 50 | # https://github.com/isl-org/Open3D/blob/ff22900c958d0216c85305ee8b3841e5699f9d57/examples/python/geometry/point_cloud_hidden_point_removal.py#L24C4-L24C4 51 | hpr_radius = diameter * 100 52 | 53 | mesh, pt_map = Cn_cloud_crop.hidden_point_removal(np.zeros((3,)), hpr_radius) 54 | Cn_cloud_HPR = Cn_cloud_crop.select_by_index(pt_map) 55 | 56 | camera_points = np.array(Cn_cloud_HPR.points) 57 | 58 | rvecs = np.zeros((3,1)) # cv2.Rodrigues(np.eye(3))[0] 59 | tvecs = np.zeros((3,1)) 60 | 61 | imgpts, _ = cv2.projectPoints(camera_points, rvecs, tvecs, 62 | camera_calib['K'], camera_calib['D']) 63 | 64 | width, height = camera_calib['resolution'] 65 | 66 | imgpts = imgpts[:,0,:] 67 | valid_points = (imgpts[:, 1] >= 0) & (imgpts[:, 1] < height) & \ 68 | (imgpts[:, 0] >= 0) & (imgpts[:, 0] < width) 69 | imgpts = imgpts[valid_points,:] 70 | depth = camera_points[valid_points,2] 71 | 72 | idx_sorted = np.argsort(depth) 73 | 74 | depth_sorted = depth[idx_sorted] 75 | imgpts_sorted = imgpts[idx_sorted,:] 76 | 77 | images = [] 78 | 79 | scales = 4 80 | 81 | for i in range(scales): 82 | images = [np.repeat(img,2,axis=1).repeat(2,axis=0) for img in images] 83 | 84 | cscale = 2 ** (scales - i - 1) 85 | image = np.zeros([height // cscale, width // cscale]) + np.inf 86 | image[imgpts[:,1].astype(int) // cscale, imgpts[:,0].astype(int) // cscale] = depth 87 | images.append(image) 88 | 89 | image[ image==0.0 ] = np.inf 90 | 91 | return image 92 | 93 | def get_view_mp(mp_tuple): 94 | return get_view(*mp_tuple) 95 | 96 | def index_map(event_t_ds, index_map_ds, time_ds): 97 | index_map_ds_cl = 0 98 | 99 | remaining_times = time_ds[...].copy() 100 | cur_loc = 0 101 | chunk_size = 10000000 102 | num_events = event_t_ds.shape[0] 103 | 104 | while remaining_times.shape[0] > 0 and cur_loc < num_events: 105 | end = min( num_events, cur_loc+chunk_size ) 106 | idx = cur_loc + np.searchsorted(event_t_ds[cur_loc:end], remaining_times) 107 | 108 | idx_remaining = (idx == end) 109 | idx_found = (idx < end) 110 | 111 | index_map_ds[index_map_ds_cl:index_map_ds_cl+idx_found.sum()] = idx[idx_found] 112 | 113 | remaining_times = remaining_times[idx_remaining] 114 | cur_loc = cur_loc + chunk_size 115 | index_map_ds_cl += idx_found.sum() 116 | 117 | def create_h5(h5fn, flio_trajectory, Cn_T_lidar, topic): 118 | Ln_T_cam = transform_inv(Cn_T_lidar) 119 | number_samples = len(flio_trajectory) 120 | 121 | h5file = h5py.File(h5fn, 'w') 122 | # Add metadata 123 | version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() 124 | h5file.attrs["version"] = version 125 | h5file.attrs["creation_date"] = str(datetime.now()) 126 | 127 | lidar_trajectory = h5file.create_dataset("Ln_T_L0", (number_samples,4,4), dtype='f8') 128 | cam_trajectory = h5file.create_dataset("Cn_T_C0", (number_samples,4,4), dtype='f8') 129 | h5file.attrs['cam_name'] = topic 130 | times = h5file.create_dataset("ts", (number_samples,), dtype='i8') 131 | 132 | for i, ft in enumerate(flio_trajectory): 133 | lidar_trajectory[i] = ft['Ln_T_L0'] 134 | cam_trajectory[i] = Cn_T_lidar @ ft['Ln_T_L0'] @ Ln_T_cam 135 | times[i] = ft['timestamp'] 136 | 137 | return h5file, number_samples 138 | 139 | def save_depth_to_h5(h5file, num_samples, topic, map_data, resolution, num_mp=1, 140 | verbose=False): 141 | depth = h5file.create_group("depth") 142 | 143 | topic = "/".join( topic.split("/")[0:3] ) 144 | 145 | depth_shape = (num_samples,resolution[1],resolution[0]) 146 | chunk_shape = (1,resolution[1],resolution[0]) 147 | topic_ds = depth.create_dataset(topic[1:], depth_shape, dtype='f4', chunks=chunk_shape, compression='lzf' ) 148 | 149 | if num_mp == 1: 150 | for i, packed_data in tqdm(enumerate(map_data), total=num_samples, 151 | disable=not verbose): 152 | topic_ds[i,...] = get_view_mp(packed_data) 153 | else: 154 | pool = multiprocessing.Pool(processes=num_mp) 155 | 156 | pool_iter = pool.imap(get_view_mp, map_data) 157 | 158 | for i, img in tqdm(enumerate(pool_iter), total=num_samples, 159 | disable=not verbose): 160 | topic_ds[i,...] = img 161 | 162 | pool.close() 163 | 164 | def check_trajectory(h5file, Cn_T_lidar): 165 | frames_a = [o3d.geometry.TriangleMesh.create_coordinate_frame()] 166 | frames_a_record = [] 167 | frames_b = [] 168 | frames_b_record = [] 169 | for i in range(h5file['Ln_T_L0'].shape[0]): 170 | frames_a_record.append( transform_inv(h5file['Ln_T_L0'][i] @ transform_inv(Cn_T_lidar) ) ) 171 | frames_a.append(o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1).transform(frames_a_record[-1] ) ) 172 | frames_a[-1].paint_uniform_color( np.array( [1.0, 0.647, 0.0] )) 173 | 174 | for i in range(h5file['Cn_T_C0'].shape[0]): 175 | frames_b_record.append( transform_inv(h5file['Cn_T_C0'][i]) ) 176 | frames_b.append(o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1).transform( frames_b_record[-1] )) 177 | o3d.visualization.draw_geometries( frames_a + frames_b ) 178 | 179 | diff = [] 180 | for a, b in zip(frames_a_record, frames_b_record): 181 | diff.append( transform_inv(a) @ b ) 182 | 183 | 184 | if __name__ == "__main__": 185 | parser = argparse.ArgumentParser() 186 | parser.add_argument("--pcd_folder", required=True, 187 | help="Dir containing the pcd") 188 | 189 | parser.add_argument("--traj_fn", required=True, 190 | help="Trajectory file name") 191 | 192 | parser.add_argument("--timesync_fn", required=True, 193 | help="Time synchronization file") 194 | 195 | parser.add_argument("--calib_h5", required=True, 196 | help="LiDAR calibration h5 file") 197 | 198 | parser.add_argument("--pose_h5_fn", required=True, 199 | help="h5 file name") 200 | 201 | parser.add_argument("--depth_h5_fn", 202 | help="h5 file name") 203 | 204 | parser.add_argument("--scan_aggregation", required=True, 205 | help="scan aggregation") 206 | 207 | parser.add_argument("--cam", default="/prophesee/left", 208 | help="Reference cam") 209 | 210 | parser.add_argument("--num_mp", default=1, type=int, 211 | help="Number of processes. Default is 1") 212 | 213 | parser.add_argument("--skip", default=1, type=int, 214 | help="Number of frames to skip. Default is 1") 215 | 216 | parser.add_argument("--only_traj", action='store_true', 217 | help="If set, only trajectory will be processed") 218 | parser.add_argument("--verbose", action='store_true', 219 | help="If set, will print progress") 220 | 221 | args = parser.parse_args() 222 | 223 | import yaml 224 | with open(args.timesync_fn, 'r') as f: 225 | timesync = yaml.safe_load(f) 226 | 227 | print("Loading trajectory") 228 | skip = args.skip 229 | flio_trajectory = load_trajectory(args.traj_fn, timesync)[::skip] 230 | 231 | print("Loading point clouds") 232 | clouds = load_clouds(args.pcd_folder) 233 | print("Filtering point clouds") 234 | clouds = filter_global_cloud(clouds) 235 | 236 | print("Loading calibs") 237 | calib_h5 = h5py.File(args.calib_h5, 'r') 238 | cam_calib = calib_h5[args.cam + "/calib"] 239 | 240 | prophesee_left_T_cam = calib_h5[args.cam + "/calib/T_to_prophesee_left"] 241 | prophesee_left_T_lidar = calib_h5["/ouster/calib/T_to_prophesee_left"] 242 | 243 | Cn_T_lidar = transform_inv(prophesee_left_T_cam) @ prophesee_left_T_lidar 244 | 245 | scan_aggregation = int(args.scan_aggregation) 246 | 247 | camera_calib = {"D": cam_calib['distortion_coeffs'][...], 248 | "K": np.eye(3), 249 | "model": cam_calib['distortion_model'][...], 250 | "Cn_T_lidar": Cn_T_lidar, 251 | "resolution": cam_calib['resolution'][...], 252 | "scan_aggregation": scan_aggregation} 253 | 254 | camera_calib['K'][0, 0] = cam_calib['intrinsics'][0] 255 | camera_calib['K'][1, 1] = cam_calib['intrinsics'][1] 256 | camera_calib['K'][0, 2] = cam_calib['intrinsics'][2] 257 | camera_calib['K'][1, 2] = cam_calib['intrinsics'][3] 258 | 259 | print("Creating h5 file") 260 | h5file, nsamples = create_h5(args.pose_h5_fn, flio_trajectory, 261 | Cn_T_lidar, args.cam) 262 | check_trajectory(h5file, Cn_T_lidar) 263 | 264 | print("Computing time maps") 265 | time_map = h5file.create_dataset("ts_map" + args.cam.replace("/", '_'), 266 | h5file['ts'].shape, dtype='u8') 267 | index_map(calib_h5[args.cam+"/t"], time_map, h5file['ts']) 268 | h5file.close() 269 | 270 | if not args.only_traj: 271 | print("Starting depth image generation") 272 | map_data = [(clouds, t, camera_calib) for t in flio_trajectory] 273 | 274 | # Create depth h5 file and copy the contents from the trajectory h5 275 | print("Copying pose file into depth") 276 | depth_h5 = h5py.File(args.depth_h5_fn, 'w') 277 | with h5py.File(args.pose_h5_fn, 'r') as f: 278 | for k in f: 279 | f.copy(k, depth_h5) 280 | 281 | print("Processing depth to h5") 282 | # Add metadata 283 | version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() 284 | depth_h5.attrs["version"] = version 285 | depth_h5.attrs["creation_date"] = str(datetime.now()) 286 | save_depth_to_h5(depth_h5, nsamples, args.cam, 287 | map_data, cam_calib['resolution'], args.num_mp, 288 | args.verbose) 289 | depth_h5.close() 290 | -------------------------------------------------------------------------------- /build_system/lidar_depth/fasterlio_plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import h5py 4 | import pdb 5 | import os 6 | import numpy as np 7 | import cv2 8 | from scipy.spatial.transform import Rotation 9 | 10 | from ouster.sdk.examples.open3d import viewer_3d 11 | from ouster.client import LidarPacket, SensorInfo, Scans, Packets, ChanField, XYZLut, _utils 12 | import open3d as o3d 13 | 14 | from util import kalibr_to_opencv, get_cloud, load_clouds, load_trajectory, transform_inv, filter_global_cloud 15 | 16 | def get_info_via_fasterlio(args): 17 | exp_name = os.path.basename(args.tmp_folder) 18 | 19 | import yaml 20 | with open(args.tmp_folder+"/"+exp_name+"_time_corrections.yaml", 'r') as f: 21 | timesync = yaml.safe_load(f) 22 | 23 | traj_file = args.tmp_folder+"/"+exp_name+".traj" 24 | traj = load_trajectory(traj_file, timesync) 25 | times = np.array([t['timestamp'] for t in traj]) 26 | orig_times = np.array([t['orig_ts'] for t in traj]) 27 | 28 | 29 | inds = list(range(traj[0]['idx'], traj[-1]['idx'], 20)) 30 | print(inds) 31 | 32 | cloud = o3d.geometry.PointCloud() 33 | points = [load_clouds(args.out_folder+"/local_scans", idx) for idx in inds] 34 | # points = filter_global_cloud(points, method='dbscan', method_kwargs={"eps":0.10, "min_points":10}) 35 | points = np.concatenate(points) 36 | cloud.points = o3d.utility.Vector3dVector(points) 37 | cloud.paint_uniform_color([1, 0.706, 0]) 38 | cloud.estimate_normals() 39 | 40 | frames = [] 41 | for idx in inds: 42 | coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0).transform( traj[idx]['L0_T_lidar'] ) 43 | frames.append(coord) 44 | 45 | # points = (points @ traj_pose['Ln_T_L0'][:3,:3].T) + traj_pose['Ln_T_L0'][:3,3] 46 | # cloud = cloud.transform( traj_pose['Ln_T_L0'] ) 47 | 48 | return cloud, frames 49 | 50 | if __name__ == "__main__": 51 | import argparse 52 | parser = argparse.ArgumentParser() 53 | parser.add_argument('--h5fn', help='HDF5 File') 54 | parser.add_argument('--percentage', type=float, help='Lidar index as percentage of sequence') 55 | parser.add_argument('--verbose', action='store_true', help='Set Verbose Mode') 56 | parser.add_argument('--confirm_only', action='store_true', help='Set to only confirm') 57 | parser.add_argument('--confirm_fn', help='Filename of figure to save out', default='tmp.pdf') 58 | parser.add_argument('--npz_fn', help='Save settings for the npz') 59 | parser.add_argument('--use_pcd', action='store_true', help="Use PCD from fasterlio") 60 | parser.add_argument('--tmp_folder', help='Tmp folder within M3ED_Build') 61 | parser.add_argument('--out_folder', help='Tmp folder within M3ED_Build') 62 | args = parser.parse_args() 63 | 64 | flio_cloud, flio_traj = get_info_via_fasterlio(args) 65 | 66 | o3d.visualization.draw_geometries([flio_cloud]+flio_traj) 67 | -------------------------------------------------------------------------------- /build_system/lidar_depth/gt2evo.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | output_files=("$GT_POSE_EVO_PATH") 5 | check_files output_files 6 | 7 | GT_POSE_EVO_PATH_TMP="${GT_POSE_EVO_PATH%.*}_tmp.${GT_POSE_EVO_PATH##*.}" 8 | 9 | # Run media generation 10 | echo -e "${BLUE}Starting conversion of GT pose${NC}" 11 | 12 | python3 build_system/lidar_depth/gt2evo.py \ 13 | --h5_gt_pose "$GT_POSE_PATH" \ 14 | --out_evo_pose "$GT_POSE_EVO_PATH_TMP" 15 | # Check if program exited with error 16 | if [ $? -ne 0 ]; then 17 | echo -e "${RED}Error creating media files for $H5_PATH${NC}" 18 | exit 1 19 | fi 20 | mv "$GT_POSE_EVO_PATH_TMP" "$GT_POSE_EVO_PATH" 21 | -------------------------------------------------------------------------------- /build_system/lidar_depth/gt2evo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | from pathlib import Path 4 | 5 | import h5py 6 | import numpy as np 7 | from colorama import Fore, Style 8 | from scipy.spatial.transform import Rotation 9 | 10 | if __name__ == "__main__": 11 | import argparse 12 | 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument("--h5_gt_pose", help="Filename h5 gt pose file", required=True) 15 | parser.add_argument( 16 | "--out_evo_pose", help="File where the output should be stored", required=True 17 | ) 18 | args = parser.parse_args() 19 | output_file = args.out_evo_pose 20 | 21 | # Check that the input file exists 22 | file_path = Path(args.h5_gt_pose) 23 | if not file_path.is_file(): 24 | sys.exit(f"{args.h5_gt_pose} does not exist") 25 | 26 | # Load GT data 27 | print(Fore.BLUE + "Loading pose file..." + Style.RESET_ALL) 28 | with h5py.File(args.h5_gt_pose, mode="r") as h5f: 29 | traj_len = h5f["ts"].shape[0] 30 | print("Trajectory length %06d" % (traj_len,)) 31 | print("Saving to %s" % output_file) 32 | 33 | print(Fore.BLUE + "Converting to EVO format..." + Style.RESET_ALL) 34 | 35 | # Start conversion 36 | with open(output_file, "w") as f: 37 | for i in range(traj_len): 38 | timestamp = h5f["ts"][i] / 1e6 39 | Ci_T_C0 = h5f["Cn_T_C0"][i] 40 | Ci_R_C0 = Rotation.from_matrix(Ci_T_C0[:3, :3]) 41 | C0_T_Ci = np.linalg.inv(Ci_T_C0) 42 | C0_R_Ci = Rotation.from_matrix(C0_T_Ci[:3, :3]) 43 | 44 | tx, ty, tz = C0_T_Ci[:3, 3] 45 | qx, qy, qz, qw = Ci_R_C0.as_quat() 46 | f.write( 47 | "%f %f %f %f %f %f %f %f\n" 48 | % ( 49 | timestamp, 50 | tx, 51 | ty, 52 | tz, 53 | qx, 54 | qy, 55 | qz, 56 | qw, 57 | ) 58 | ) 59 | -------------------------------------------------------------------------------- /build_system/lidar_depth/gt2media.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | output_files=("$DEPTH_VIDEO_RAW" "$DEPTH_EVENTS_VIDEO_RAW") 5 | check_files output_files 6 | 7 | check_free_space fixed 8 | 9 | DEPTH_VIDEO_RAW_TMP="${DEPTH_VIDEO_RAW%.*}_tmp.${DEPTH_VIDEO_RAW##*.}" 10 | DEPTH_EVENTS_VIDEO_RAW_TMP="${DEPTH_EVENTS_VIDEO_RAW%.*}_tmp.${DEPTH_EVENTS_VIDEO_RAW##*.}" 11 | 12 | # Run media generation 13 | echo -e "${BLUE}Starting depth video generation${NC}" 14 | python3 build_system/lidar_depth/gt2media.py \ 15 | --h5_depth "$GT_DEPTH_PATH" --out "$DEPTH_VIDEO_RAW_TMP" 16 | # Check if program exited with error 17 | if [ $? -ne 0 ]; then 18 | echo -e "${RED}Error creating media files for $H5_PATH${NC}" 19 | exit 1 20 | fi 21 | mv "$DEPTH_VIDEO_RAW_TMP" "$DEPTH_VIDEO_RAW" 22 | echo -e "${GREEN}Depth video raw created${NC}" 23 | 24 | # Compress video 25 | echo -e "${BLUE}Compressing${NC}" 26 | compress_with_ffmpeg "$DEPTH_VIDEO_RAW" 27 | echo -e "${GREEN}Compression finished${NC}" 28 | 29 | echo -e "${BLUE}Starting depth events video generation${NC}" 30 | python3 build_system/lidar_depth/gt2media.py \ 31 | --h5_depth "$GT_DEPTH_PATH" \ 32 | --h5_events "$H5_PATH" \ 33 | --out "$DEPTH_EVENTS_VIDEO_RAW_TMP" 34 | # Check if program exited with error 35 | if [ $? -ne 0 ]; then 36 | echo -e "${RED}Error creating media files for $H5_PATH${NC}" 37 | exit 1 38 | fi 39 | mv "$DEPTH_EVENTS_VIDEO_RAW_TMP" "$DEPTH_EVENTS_VIDEO_RAW" 40 | echo -e "${GREEN}Depth video raw created${NC}" 41 | 42 | # Compress video 43 | echo -e "${BLUE}Compressing${NC}" 44 | compress_with_ffmpeg "$DEPTH_EVENTS_VIDEO_RAW" 45 | echo -e "${GREEN}Compression finished${NC}" 46 | -------------------------------------------------------------------------------- /build_system/lidar_depth/gt2media.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | import yaml 5 | import pdb 6 | import numpy as np 7 | import cv2 8 | import h5py 9 | from matplotlib import cm 10 | from colorama import Fore, Style 11 | 12 | NAX_NUM_EVENTS = 1000000 13 | 14 | if __name__ == "__main__": 15 | import argparse 16 | ap = argparse.ArgumentParser() 17 | ap.add_argument("--h5_depth", help="Input containing the depth", required=True) 18 | ap.add_argument("--outfn", help="Output video file", required=True) 19 | ap.add_argument("--h5_events", help="Input containing the events") 20 | # Add a debug flag argument 21 | ap.add_argument("--debug", help="Debug flag", action="store_true") 22 | ap = ap.parse_args() 23 | 24 | # Check that the input depth file exists 25 | if not os.path.exists(ap.h5_depth): 26 | sys.exit(f"Input file does not exist: {ap.h5_depth}") 27 | 28 | # Check that the input for events exists 29 | if ap.h5_events is not None: 30 | if not os.path.exists(ap.h5_events): 31 | sys.exit("Input file does not exist: {}".format(ap.h5_events)) 32 | else: 33 | use_events = True 34 | print(Fore.YELLOW + "Creating video with events" + Style.RESET_ALL) 35 | else: 36 | use_events = False 37 | print(Fore.YELLOW + "Creating video without events" + Style.RESET_ALL) 38 | 39 | print(Fore.BLUE + "Loading depth file..." + Style.RESET_ALL) 40 | with h5py.File(ap.h5_depth, "r") as f: 41 | imgs = f["/depth/prophesee/left"][:] 42 | map = f["/ts_map_prophesee_left"][:] 43 | num_frames, height, width = imgs.shape 44 | 45 | # replace all 0 with np.inf 46 | imgs[imgs == 0] = np.inf 47 | 48 | min_depth = np.min(imgs) 49 | # Get the max depth as the percentile 99 that is not inf 50 | max_depth = np.percentile(imgs[imgs != np.inf], 99.7) 51 | 52 | # Initialize the video writer 53 | fourcc = cv2.VideoWriter_fourcc(*"FFV1") 54 | fps = 10 55 | video_writer = cv2.VideoWriter(ap.outfn, fourcc, 56 | fps, (width, height), 57 | isColor=True) 58 | 59 | if use_events: 60 | events_file = h5py.File(ap.h5_events, "r") 61 | g = events_file["/prophesee/left"] 62 | cmap = cm.get_cmap("summer") 63 | else: 64 | cmap = cm.get_cmap("jet") 65 | 66 | # Process each frame and write to the video 67 | for i in range(num_frames-1): 68 | # Print without newline 69 | if ap.debug: 70 | print("\rProcessing frame {}/{}".format(i, num_frames), end="") 71 | 72 | 73 | # Read the frame from the dataset 74 | depth_frame = imgs[i, :, :] 75 | depth_frame_clipped = np.clip(depth_frame, min_depth, max_depth) 76 | depth_frame_clipped[depth_frame == np.inf] = np.inf 77 | 78 | # Map the depth values to a colormap 79 | depth_frame_normalized = (depth_frame_clipped - min_depth) / (max_depth - min_depth) 80 | depth_frame_colored = (cmap(depth_frame_normalized)[:, :, :3] * 255).astype(np.uint8) 81 | 82 | # Map all the inf values to black 83 | depth_frame_colored[depth_frame == np.inf] = 0 84 | 85 | if use_events: 86 | ev_x = g["x"][map[i]:map[i+1]] 87 | ev_y = g["y"][map[i]:map[i+1]] 88 | ev_p = g["p"][map[i]:map[i+1]] 89 | ev_t = g["t"][map[i]:map[i+1]] 90 | 91 | min_t = np.min(ev_t) 92 | max_t = min_t + 5e3 93 | idx = np.logical_and(ev_t > min_t, ev_t < max_t) 94 | 95 | # Create indices to sample the event stream 96 | # idx = np.random.randint(0, len(ev_x), 97 | # np.min((NAX_NUM_EVENTS, len(ev_x)))) 98 | 99 | ev_x = ev_x[idx] 100 | ev_y = ev_y[idx] 101 | ev_p = ev_p[idx] 102 | # Color the events 103 | ev_p = ev_p*255 104 | color = np.array([ev_p, np.zeros_like(ev_p), 255-ev_p]) 105 | color = color // 2 106 | depth_frame_colored[ev_y, ev_x, :] = color.T 107 | 108 | # Write the frame to the video 109 | video_writer.write(depth_frame_colored) 110 | if ap.debug: 111 | print("") 112 | # Release the video writer and close the HDF5 file 113 | video_writer.release() 114 | if use_events: 115 | events_file.close() 116 | -------------------------------------------------------------------------------- /build_system/lidar_depth/gt2verify.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | # output_files=("$DEPTH_VIDEO_RAW" "$DEPTH_EVENTS_VIDEO_RAW") 5 | # check_files output_files 6 | # if CHECK_POSE_RETURN is 0, skip this file 7 | if [ $CHECK_POSE_RETURN -eq 0 ]; then 8 | echo -e "${BLUE}Skipping GT Check for $H5_PATH${NC}" 9 | exit 0 10 | fi 11 | 12 | # Run media generation 13 | echo -e "${BLUE}Starting GT Check${NC}" 14 | python3 build_system/lidar_depth/gt2verify.py \ 15 | --h5_depth "$GT_POSE_PATH" \ 16 | --absolute_error "$ABSOLUTE_POSITION_ERROR" \ 17 | --pose_stats "$TMP_PATH/pose_stats.txt" 18 | # Check if program exited with error 19 | if [ $? -ne 0 ]; then 20 | echo -e "${RED}Error creating media files for $H5_PATH${NC}" 21 | exit 1 22 | fi 23 | -------------------------------------------------------------------------------- /build_system/lidar_depth/gt2verify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | import pdb 5 | import numpy as np 6 | import h5py 7 | import matplotlib.pyplot as plt 8 | from matplotlib import cm 9 | from colorama import Fore, Style 10 | 11 | if __name__ == "__main__": 12 | import argparse 13 | ap = argparse.ArgumentParser() 14 | ap.add_argument("--h5_depth", help="Input containing the depth", 15 | required=True) 16 | ap.add_argument("--relative_error", type=float, default=0.002, 17 | help="Relative error allowed - cumulative distance") 18 | ap.add_argument("--absolute_error", type=float, required=True, 19 | help="Absolute error allowed") 20 | ap.add_argument("--pose_stats", type=str, 21 | help="Filename where to save debug information") 22 | # Add a debug flag argument 23 | ap.add_argument("--debug", help="Debug flag", action="store_true") 24 | ap = ap.parse_args() 25 | 26 | # Check that the input depth file exists 27 | if not os.path.exists(ap.h5_depth): 28 | sys.exit(f"Input file does not exist: {ap.h5_depth}") 29 | 30 | print(Fore.BLUE + "Loading depth file..." + Style.RESET_ALL) 31 | with h5py.File(ap.h5_depth, "r") as f: 32 | Cn_T_C0 = f["/Cn_T_C0"][:] 33 | C0_T_Cn = np.linalg.inv( Cn_T_C0 ) 34 | Ln_T_L0 = f["/Ln_T_L0"][:] 35 | L0_T_Ln = np.linalg.inv( Ln_T_L0 ) 36 | 37 | if ap.debug: 38 | print("Start") 39 | print(C0_T_Cn[0]) 40 | print("Stop") 41 | print(C0_T_Cn[-1]) 42 | 43 | t0_T_tn = C0_T_Cn[:,:3,3] 44 | 45 | frame_translation = np.diff(t0_T_tn, axis=0) 46 | frame_dist = np.sqrt(np.sum(frame_translation**2, axis=1)) 47 | cumulative_distance = np.sum(frame_dist) 48 | error = np.sum(np.sqrt(np.sum(np.diff(t0_T_tn[[0,-1],:], axis=0)**2, axis=1))) 49 | 50 | print(Fore.YELLOW + f"Cumulative distance: {cumulative_distance}" + Style.RESET_ALL) 51 | print(Fore.YELLOW + f"Error: {error}" + Style.RESET_ALL) 52 | 53 | error_threshold = cumulative_distance * ap.relative_error + ap.absolute_error 54 | print(Fore.YELLOW + f"Error threshold: {error_threshold}" + Style.RESET_ALL) 55 | with open(ap.pose_stats, "w") as f: 56 | f.write(f"Input file: {ap.h5_depth}\n") 57 | f.write(f"Cumulative distance: {cumulative_distance}\n") 58 | f.write(f"Error threshold: {error_threshold}\n") 59 | f.write(f"Error: {error}\n") 60 | f.write(f"Relative error (parameter): {ap.relative_error}\n") 61 | f.write(f"Absolute error (parameter): {ap.absolute_error}\n") 62 | 63 | if error < error_threshold: 64 | print(Fore.GREEN + "PASSED" + Style.RESET_ALL) 65 | else: 66 | print(Fore.RED + "FAILED" + Style.RESET_ALL) 67 | sys.exit(1) 68 | -------------------------------------------------------------------------------- /build_system/lidar_depth/lidar_calib.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ GUI for hand calibrating the LiDAR with the different sequences """ 4 | 5 | import h5py 6 | import pdb 7 | import os 8 | import numpy as np 9 | import cv2 10 | from scipy.spatial.transform import Rotation 11 | from scipy.spatial.transform import Slerp 12 | 13 | from ouster.sdk.examples.open3d import viewer_3d 14 | from ouster.client import LidarPacket, SensorInfo, Scans, Packets, ChanField, XYZLut, _utils 15 | import open3d as o3d 16 | 17 | from util import kalibr_to_opencv, get_cloud, load_clouds, load_trajectory, load_imu_based_calib 18 | 19 | class ManualCalibrator: 20 | def __init__(self, cloud_image_list, camera_calib, R_imu, fn=None): 21 | # Prepare point cloud for better plotting 22 | bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound=(1,-30,-30), max_bound=(150,30,30)) 23 | 24 | self.clouds = [c[0].crop(bbox) for c in cloud_image_list] 25 | self.cloud_points = [np.array(c.points) for c in self.clouds] 26 | self.cloud_colors = [np.array(c.colors) for c in self.clouds] 27 | 28 | self.images = [c[1] for c in cloud_image_list] 29 | 30 | self.camera_calib = camera_calib 31 | self.point_scale = 1 32 | 33 | self.R_ideal = np.array([[ 0, 0, 1], 34 | [-1, 0, 0], 35 | [ 0,-1, 0]]) 36 | 37 | self.fn = fn 38 | if os.path.exists(self.fn): 39 | lidar_calib_info = np.load(self.fn) 40 | if not 'R_imu' in lidar_calib_info or not 'origin' in lidar_calib_info: 41 | self.rotvec = np.zeros(3) 42 | self.t = lidar_calib_info['t_c_l'] 43 | self.R_imu = R_imu 44 | else: 45 | if lidar_calib_info['origin'] == self.fn: 46 | self.R_imu = lidar_calib_info['R_imu'] 47 | self.rotvec = lidar_calib_info['r_c_l'] 48 | self.t = lidar_calib_info['t_c_l'] 49 | else: 50 | self.R_imu = R_imu 51 | self.rotvec = np.zeros(3) 52 | self.t = lidar_calib_info['t_c_l'] 53 | else: 54 | self.rotvec = np.zeros(3) 55 | self.t = np.zeros(3) 56 | self.R_imu = R_imu 57 | 58 | print("Initializing") 59 | print(self.rotvec) 60 | print(self.t) 61 | print(self.R_imu) 62 | 63 | self.masks = [None] * len(self.clouds) 64 | 65 | def show_cloud(self): 66 | o3d.visualization.draw_geometries(self.clouds) 67 | 68 | def project_points(self, cloud_points, mask, cloud_colors=None, 69 | confirm_only=False): 70 | T_c_l = np.eye(4) 71 | T_c_l[:3,:3] = (Rotation.from_matrix(self.R_imu) * Rotation.from_rotvec(self.rotvec)).as_matrix().T 72 | T_c_l[:3,3] = self.t 73 | 74 | if not confirm_only: 75 | np.savez(self.fn, T_c_l=T_c_l, r_c_l=self.rotvec, t_c_l=self.t, R_imu=self.R_imu, origin=self.fn) 76 | 77 | transformed_cloud = cloud_points @ (Rotation.from_matrix(self.R_imu) * Rotation.from_rotvec(self.rotvec)).as_matrix() + self.t 78 | 79 | rvecs = cv2.Rodrigues(np.eye(3))[0] 80 | tvecs = np.zeros((3,1)) 81 | 82 | imgpts, jac = cv2.projectPoints( 83 | transformed_cloud.reshape(-1,1,3), 84 | rvecs, 85 | tvecs, 86 | self.camera_calib['camera_matrix'], 87 | self.camera_calib['distortion_coefficients']) 88 | 89 | if mask is None: 90 | mask = (imgpts[:,:,0] > 0) * (imgpts[:,:,0]<1280) # * (imgpts[:,:,1] > 0) * (imgpts[:,:,1]<720) 91 | mask = mask.squeeze() 92 | 93 | imgpts = imgpts.squeeze()[mask] 94 | depth = transformed_cloud[mask][:,2] 95 | colors = np.log(depth) # self.cloud_colors[mask] 96 | colors = colors / colors.max() 97 | 98 | return imgpts, colors, mask 99 | 100 | def update_transform(self, val): 101 | self.t = np.array([self.slider_l_x.val, self.slider_l_y.val, self.slider_l_z.val]) 102 | self.rotvec = np.array([self.slider_r_x.val, self.slider_r_y.val, self.slider_r_z.val]) 103 | 104 | self.point_scale = self.slider_scale.val 105 | 106 | for i in range(len(self.clouds)): 107 | imgpts, colors, mask = self.project_points(self.cloud_points[i], self.masks[i]) 108 | 109 | self.scatter_plts[i].set_offsets( imgpts ) 110 | self.scatter_plts[i].set_sizes( np.ones((imgpts.shape[0],)) * self.point_scale) 111 | 112 | self.fig.canvas.draw_idle() 113 | 114 | def plot(self, confirm_only=False, figfn='tmp.pdf'): 115 | if confirm_only: 116 | import matplotlib as mpl 117 | mpl.use('Agg') 118 | import matplotlib.pyplot as plt 119 | imgpts, colors,_ = self.project_points(self.cloud_points[0], None, 120 | confirm_only=confirm_only) 121 | self.fig, axes = plt.subplots(1,1) 122 | axes.imshow(self.image.squeeze(), cmap='gray') 123 | axes.scatter(x=imgpts[:,0], y=imgpts[:,1], c=colors, s=0.5) 124 | self.fig.savefig(figfn, dpi=400) 125 | else: 126 | import matplotlib.pyplot as plt 127 | 128 | self.fig, axes = plt.subplots(8,len(self.clouds),height_ratios=[25,1,1,1,1,1,1,1],width_ratios=[1]*len(self.clouds), squeeze=False) 129 | 130 | self.img_plts = [] 131 | self.scatter_plts = [] 132 | for i in range(len(self.clouds)): 133 | imgpts, colors, mask = self.project_points(self.cloud_points[i], self.masks[i]) 134 | self.masks[i] = mask 135 | self.img_plts.append(axes[0,i].imshow(self.images[i], cmap='gray')) 136 | self.scatter_plts.append(axes[0,i].scatter(x=imgpts[:,0], y=imgpts[:,1], c=colors, s=self.point_scale)) 137 | axes[0,i].set_xlim([-100,1380]) 138 | axes[0,i].set_ylim([820,-100]) 139 | 140 | center_axis_id = len(self.clouds) // 2 141 | from matplotlib.widgets import Slider, Button 142 | self.slider_l_x = Slider(axes[1, center_axis_id], "X", -0.40, 0.40, valinit=self.t[0]) 143 | self.slider_l_y = Slider(axes[2, center_axis_id], "Y", -0.40, 0.40, valinit=self.t[1]) 144 | self.slider_l_z = Slider(axes[3, center_axis_id], "Z", -0.40, 0.40, valinit=self.t[2]) 145 | 146 | self.slider_r_x = Slider(axes[4, center_axis_id], "Rx", -0.05, 0.05, valinit=self.rotvec[0]) 147 | self.slider_r_y = Slider(axes[5, center_axis_id], "Ry", -0.15, 0.07, valinit=self.rotvec[1]) 148 | self.slider_r_z = Slider(axes[6, center_axis_id], "Rz", -0.15, 0.15, valinit=self.rotvec[2]) 149 | 150 | self.slider_scale = Slider(axes[7, center_axis_id], "S", 0.5, 20, valinit=2) 151 | 152 | self.slider_l_x.on_changed(self.update_transform) 153 | self.slider_l_y.on_changed(self.update_transform) 154 | self.slider_l_z.on_changed(self.update_transform) 155 | self.slider_r_x.on_changed(self.update_transform) 156 | self.slider_r_y.on_changed(self.update_transform) 157 | self.slider_r_z.on_changed(self.update_transform) 158 | self.slider_scale.on_changed(self.update_transform) 159 | 160 | plt.show() 161 | 162 | def get_images(h5f, idx): 163 | return { 164 | '/ovc/left': h5f['/ovc/left/data'][idx,...], 165 | '/ovc/right': h5f['/ovc/right/data'][idx,...], 166 | '/ovc/ts': h5f['/ovc/ts'][idx,...], 167 | } 168 | 169 | def get_info_via_scans(hdf5_file, args, ts): 170 | lidar_start_ts = hdf5_file['/ouster/ts_start'] 171 | lidar_end_ts = hdf5_file['/ouster/ts_end'] 172 | 173 | lidar_mid_times = ( lidar_start_ts[...] + lidar_end_ts[...] ) / 2 174 | lidar_idx = np.argmin( np.abs(lidar_mid_times - ts) ) 175 | lidar_mid_time = lidar_mid_times[lidar_idx] 176 | 177 | camera_ts = hdf5_file['/ovc/ts'] 178 | 179 | cam_idx = np.argmin( np.abs(lidar_mid_time - camera_ts) ) 180 | 181 | cloud = get_cloud(hdf5_file, lidar_idx) 182 | images = get_images(hdf5_file, cam_idx) 183 | return cloud, images 184 | 185 | def get_info_via_fasterlio(hdf5_file, args, percentage): 186 | exp_name = os.path.basename(args.tmp_folder) 187 | 188 | import yaml 189 | with open(args.tmp_folder+"/"+exp_name+"_time_corrections.yaml", 'r') as f: 190 | timesync = yaml.safe_load(f) 191 | 192 | traj_file = args.tmp_folder+"/"+exp_name+".traj" 193 | traj = load_trajectory(traj_file, timesync, hdf5_file) 194 | orig_times = np.array([t['orig_ts'] for t in traj]) 195 | orig_corrected_times = np.array([t['orig_corrected_ts'] for t in traj]) 196 | 197 | lidar_times = np.array([t['timestamp'] for t in traj]) 198 | # nominal_times = (np.arange(lidar_times.shape[0])+1)* 100000 199 | # offset = (lidar_times - nominal_times)[0] 200 | # offset = (offset/1000).round() * 1000 201 | # lidar_times = nominal_times + offset 202 | 203 | ovc_times = hdf5_file['/ovc/ts'][...] 204 | 205 | from scipy.spatial.distance import cdist 206 | all_pair_times = cdist(lidar_times[:,None], ovc_times[:,None]) 207 | lidar_to_camera = np.argmin(np.abs(all_pair_times),axis=1) 208 | lidar_to_camera = np.searchsorted(ovc_times, lidar_times ) 209 | lidar_to_camera[lidar_to_camera == 0] = -1 210 | lidar_to_camera[lidar_to_camera >= ovc_times.shape[0]] = -1 211 | 212 | # lidar_times = lidar_times[lidar_to_camera < ovc_times.shape[0]] 213 | # lidar_to_camera = lidar_to_camera[lidar_to_camera < ovc_times.shape[0]] 214 | 215 | closest_ovc_times = ovc_times[lidar_to_camera] 216 | closest_time_diffs = closest_ovc_times - lidar_times 217 | 218 | lidar_idx = int(percentage * lidar_to_camera.shape[0]) 219 | cam_idx = lidar_to_camera[lidar_idx] 220 | lidar_idx += 0 221 | 222 | if np.abs(lidar_times[lidar_idx] - ovc_times[cam_idx]) > 20000: 223 | print("Foo") 224 | lidar_idx += 1 225 | cam_idx = lidar_to_camera[lidar_idx] 226 | 227 | traj_entry = traj[lidar_idx] 228 | lidar_time = traj_entry['timestamp'] 229 | 230 | Lnm1_T_L0 = traj[lidar_idx-1]['Ln_T_L0'] # time = -100000 231 | Ln_T_L0 = traj_entry['Ln_T_L0'] # time = 0 232 | Lnp1_T_L0 = traj[lidar_idx+1]['Ln_T_L0'] # time = 100000 233 | 234 | R_T_R0 = Rotation.from_matrix([ 235 | Lnm1_T_L0[:3,:3], 236 | Ln_T_L0[:3,:3], 237 | Lnp1_T_L0[:3,:3], 238 | ]) 239 | slerp = Slerp([-100000,0,100000], R_T_R0) # setup slerp interpolation 240 | 241 | rel_cam_time = (ovc_times[cam_idx] - lidar_time) 242 | 243 | Lc_T_L0 = np.eye(4) 244 | Lc_T_L0[:3,:3] = slerp(rel_cam_time).as_matrix() # interpolate to current time 245 | 246 | if rel_cam_time < 0: 247 | a, b, c = (-rel_cam_time/100000), ((100000+rel_cam_time)/100000), 0.0 248 | elif rel_cam_time > 0: 249 | a, b, c = 0.0, ((100000-rel_cam_time)/100000), ((rel_cam_time)/100000) 250 | else: 251 | a, b, c = 0.0, 1.0, 0.0 252 | 253 | Lc_T_L0[:3,3:] = a * Lnm1_T_L0[:3,3:] + b * Ln_T_L0[:3,3:] + c * Lnp1_T_L0[:3,3:] 254 | 255 | cloud = o3d.geometry.PointCloud() 256 | points = [load_clouds(args.out_folder+"/local_scans", idx) for idx in range(traj_entry['idx'], traj_entry['idx']+4)] 257 | points = np.concatenate(points) 258 | 259 | points = (points @ Lc_T_L0[:3,:3].T) + Lc_T_L0[:3,3] 260 | 261 | cloud.points = o3d.utility.Vector3dVector(points) 262 | cloud.paint_uniform_color([1, 0.706, 0]) 263 | 264 | mesh, pt_map = cloud.hidden_point_removal(np.zeros((3,)), 10000. ) # camera_location['Ln_T_L0'][:3,3], 1000000.0 ) 265 | cloud = cloud.select_by_index(pt_map) 266 | 267 | images = get_images(hdf5_file, cam_idx) 268 | 269 | print("{ cam_idx:", cam_idx, ", lidar_idx: ", lidar_idx) 270 | print(" dt ", ovc_times[cam_idx] - lidar_time) 271 | 272 | return cloud, images['/ovc/left'] 273 | 274 | if __name__ == "__main__": 275 | import argparse 276 | parser = argparse.ArgumentParser() 277 | parser.add_argument('--h5fn', help='HDF5 File') 278 | parser.add_argument('--percentage', nargs='+',type=float, help='Lidar index as percentage of sequence') 279 | parser.add_argument('--verbose', action='store_true', help='Set Verbose Mode') 280 | parser.add_argument('--confirm_only', action='store_true', help='Set to only confirm') 281 | parser.add_argument('--confirm_fn', help='Filename of figure to save out', default='tmp.pdf') 282 | parser.add_argument('--npz_fn', help='Save settings for the npz') 283 | parser.add_argument('--use_pcd', action='store_true', help="Use PCD from fasterlio") 284 | parser.add_argument('--tmp_folder', help='Tmp folder within M3ED_Build') 285 | parser.add_argument('--out_folder', help='Tmp folder within M3ED_Build') 286 | args = parser.parse_args() 287 | 288 | hdf5_file = h5py.File(args.h5fn, 'r') 289 | 290 | lidar_end_ts = hdf5_file['/ouster/ts_end'] 291 | 292 | clouds_and_images = [get_info_via_fasterlio(hdf5_file, args, p) for p in args.percentage] 293 | 294 | left_h5_calib = hdf5_file['/ovc/left/calib'] 295 | K = np.eye(3) 296 | np.fill_diagonal(K[:2,:2], left_h5_calib['intrinsics'][:2]) 297 | K[:2,2] = left_h5_calib['intrinsics'][2:] 298 | D = np.zeros(5) 299 | D[:4] = left_h5_calib['distortion_coeffs'] 300 | left_cam_calib = { 301 | "camera_matrix": K, 302 | "distortion_coefficients": D, 303 | } 304 | 305 | R_imu = load_imu_based_calib(hdf5_file) 306 | 307 | mc = ManualCalibrator(clouds_and_images, left_cam_calib, R_imu, args.npz_fn) 308 | mc.plot(args.confirm_only, args.confirm_fn) 309 | -------------------------------------------------------------------------------- /build_system/lidar_depth/long_range_ouster64.yaml: -------------------------------------------------------------------------------- 1 | common: 2 | lid_topic: "/os_node/points" 3 | imu_topic: "/os_node/imu" 4 | time_sync_en: false # ONLY turn on when external time synchronization is really not possible 5 | 6 | preprocess: 7 | lidar_type: 3 # 1 for Livox serials LiDAR, 2 for Velodyne LiDAR, 3 for ouster LiDAR, 8 | scan_line: 64 9 | blind: 4 10 | time_scale: 1e-3 11 | 12 | mapping: 13 | acc_cov: 0.1 14 | gyr_cov: 0.1 15 | b_acc_cov: 0.0001 16 | b_gyr_cov: 0.0001 17 | fov_degree: 180 18 | det_range: 150.0 19 | extrinsic_est_en: false # true: enable the online estimation of IMU-LiDAR extrinsic 20 | extrinsic_T: [ -0.00625, 0.01175, -0.007645 ] 21 | extrinsic_R: [1, 0, 0, 22 | 0, 1, 0, 23 | 0, 0, 1] 24 | 25 | publish: 26 | path_publish_en: false 27 | scan_publish_en: true # false: close all the point cloud output 28 | scan_effect_pub_en: true # true: publish the pointscloud of effect point 29 | dense_publish_en: false # false: low down the points number in a global-frame point clouds scan. 30 | scan_bodyframe_pub_en: true # true: output the point cloud scans in IMU-body-frame 31 | 32 | path_save_en: true 33 | 34 | pcd_save: 35 | pcd_save_en: true 36 | interval: 1 # how many LiDAR frames saved in each pcd file; 37 | # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames. 38 | feature_extract_enable: false 39 | point_filter_num: 3 40 | max_iteration: 4 41 | filter_size_surf: 0.20 42 | filter_size_map: 0.20 # 暂时未用到,代码中为0, 即倾向于将降采样后的scan中的所有点加入map 43 | cube_side_length: 5000 44 | 45 | ivox_grid_resolution: 0.20 # default=0.2 46 | ivox_nearby_type: 26 # 6, 18, 26 47 | esti_plane_threshold: 0.1 # default=0.1 48 | -------------------------------------------------------------------------------- /build_system/lidar_depth/ouster_bag_convert/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(ouster_bag_convert) 3 | 4 | find_package(Eigen3 REQUIRED) 5 | find_package(PCL REQUIRED COMPONENTS common) 6 | find_package(tf2_eigen REQUIRED) 7 | find_package(CURL REQUIRED) 8 | find_package(Boost REQUIRED) 9 | 10 | ## Compile as C++11, supported in ROS Kinetic and newer 11 | # add_compile_options(-std=c++11) 12 | 13 | ## Find catkin macros and libraries 14 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 15 | ## is used, also find other catkin packages 16 | find_package(catkin REQUIRED COMPONENTS 17 | geometry_msgs 18 | ouster_ros 19 | roscpp 20 | sensor_msgs 21 | std_msgs 22 | pcl_conversions 23 | tf2 24 | tf2_ros 25 | rosbag 26 | ) 27 | 28 | ## System dependencies are found with CMake's conventions 29 | # find_package(Boost REQUIRED COMPONENTS system) 30 | 31 | 32 | ## Uncomment this if the package has a setup.py. This macro ensures 33 | ## modules and global scripts declared therein get installed 34 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 35 | # catkin_python_setup() 36 | 37 | ################################################ 38 | ## Declare ROS messages, services and actions ## 39 | ################################################ 40 | 41 | ## To declare and build messages, services or actions from within this 42 | ## package, follow these steps: 43 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 44 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 45 | ## * In the file package.xml: 46 | ## * add a build_depend tag for "message_generation" 47 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 48 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 49 | ## but can be declared for certainty nonetheless: 50 | ## * add a exec_depend tag for "message_runtime" 51 | ## * In this file (CMakeLists.txt): 52 | ## * add "message_generation" and every package in MSG_DEP_SET to 53 | ## find_package(catkin REQUIRED COMPONENTS ...) 54 | ## * add "message_runtime" and every package in MSG_DEP_SET to 55 | ## catkin_package(CATKIN_DEPENDS ...) 56 | ## * uncomment the add_*_files sections below as needed 57 | ## and list every .msg/.srv/.action file to be processed 58 | ## * uncomment the generate_messages entry below 59 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 60 | 61 | ## Generate messages in the 'msg' folder 62 | # add_message_files( 63 | # FILES 64 | # Message1.msg 65 | # Message2.msg 66 | # ) 67 | 68 | ## Generate services in the 'srv' folder 69 | # add_service_files( 70 | # FILES 71 | # Service1.srv 72 | # Service2.srv 73 | # ) 74 | 75 | ## Generate actions in the 'action' folder 76 | # add_action_files( 77 | # FILES 78 | # Action1.action 79 | # Action2.action 80 | # ) 81 | 82 | ## Generate added messages and services with any dependencies listed here 83 | # generate_messages( 84 | # DEPENDENCIES 85 | # geometry_msgs# sensor_msgs# std_msgs 86 | # ) 87 | 88 | ################################################ 89 | ## Declare ROS dynamic reconfigure parameters ## 90 | ################################################ 91 | 92 | ## To declare and build dynamic reconfigure parameters within this 93 | ## package, follow these steps: 94 | ## * In the file package.xml: 95 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 96 | ## * In this file (CMakeLists.txt): 97 | ## * add "dynamic_reconfigure" to 98 | ## find_package(catkin REQUIRED COMPONENTS ...) 99 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 100 | ## and list every .cfg file to be processed 101 | 102 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 103 | # generate_dynamic_reconfigure_options( 104 | # cfg/DynReconf1.cfg 105 | # cfg/DynReconf2.cfg 106 | # ) 107 | 108 | ################################### 109 | ## catkin specific configuration ## 110 | ################################### 111 | ## The catkin_package macro generates cmake config files for your package 112 | ## Declare things to be passed to dependent projects 113 | ## INCLUDE_DIRS: uncomment this if your package contains header files 114 | ## LIBRARIES: libraries you create in this project that dependent projects also need 115 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 116 | ## DEPENDS: system dependencies of this project that dependent projects also need 117 | catkin_package( 118 | # INCLUDE_DIRS include 119 | # LIBRARIES ouster_bag_convert 120 | # CATKIN_DEPENDS geometry_msgs ouster_ros roscpp sensor_msgs std_msgs 121 | # DEPENDS system_lib 122 | ) 123 | 124 | ########### 125 | ## Build ## 126 | ########### 127 | 128 | ## Specify additional locations of header files 129 | ## Your package locations should be listed before other locations 130 | include_directories( 131 | # include 132 | ${catkin_INCLUDE_DIRS} 133 | ) 134 | 135 | ## Declare a C++ library 136 | # add_library(${PROJECT_NAME} 137 | # src/${PROJECT_NAME}/ouster_bag_convert.cpp 138 | # ) 139 | 140 | ## Add cmake target dependencies of the library 141 | ## as an example, code may need to be generated before libraries 142 | ## either from message generation or dynamic reconfigure 143 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 144 | 145 | ## Declare a C++ executable 146 | ## With catkin_make all packages are built within a single CMake context 147 | ## The recommended prefix ensures that target names across packages don't collide 148 | add_executable(ouster_bag_converter src/bag_rewriter.cpp) 149 | 150 | ## Rename C++ executable without prefix 151 | ## The above recommended prefix causes long target names, the following renames the 152 | ## target back to the shorter version for ease of user use 153 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 154 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 155 | 156 | ## Add cmake target dependencies of the executable 157 | ## same as for the library above 158 | # add_dependencies(ouster_bag_converter ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 159 | 160 | ## Specify libraries to link a library or executable target against 161 | target_link_libraries(ouster_bag_converter 162 | ${catkin_LIBRARIES} 163 | ) 164 | 165 | ############# 166 | ## Install ## 167 | ############# 168 | 169 | # all install targets should use catkin DESTINATION variables 170 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 171 | 172 | ## Mark executable scripts (Python etc.) for installation 173 | ## in contrast to setup.py, you can choose the destination 174 | # catkin_install_python(PROGRAMS 175 | # scripts/my_python_script 176 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 177 | # ) 178 | 179 | ## Mark executables for installation 180 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 181 | # install(TARGETS ${PROJECT_NAME}_node 182 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 183 | # ) 184 | 185 | ## Mark libraries for installation 186 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 187 | # install(TARGETS ${PROJECT_NAME} 188 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 189 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 190 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 191 | # ) 192 | 193 | ## Mark cpp header files for installation 194 | # install(DIRECTORY include/${PROJECT_NAME}/ 195 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 196 | # FILES_MATCHING PATTERN "*.h" 197 | # PATTERN ".svn" EXCLUDE 198 | # ) 199 | 200 | ## Mark other files for installation (e.g. launch and bag files, etc.) 201 | # install(FILES 202 | # # myfile1 203 | # # myfile2 204 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 205 | # ) 206 | 207 | ############# 208 | ## Testing ## 209 | ############# 210 | 211 | ## Add gtest based cpp test target and link libraries 212 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_ouster_bag_convert.cpp) 213 | # if(TARGET ${PROJECT_NAME}-test) 214 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 215 | # endif() 216 | 217 | ## Add folders to be run by python nosetests 218 | # catkin_add_nosetests(test) 219 | -------------------------------------------------------------------------------- /build_system/lidar_depth/ouster_bag_convert/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | ouster_bag_convert 4 | 0.0.0 5 | The ouster_bag_convert package 6 | 7 | 8 | 9 | 10 | ken 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | geometry_msgs 53 | ouster_ros 54 | roscpp 55 | sensor_msgs 56 | std_msgs 57 | geometry_msgs 58 | ouster_ros 59 | roscpp 60 | sensor_msgs 61 | std_msgs 62 | geometry_msgs 63 | ouster_ros 64 | roscpp 65 | sensor_msgs 66 | std_msgs 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /build_system/lidar_depth/ouster_bag_convert/src/bag_rewriter.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | #include 11 | 12 | // Not defined in a header in ouster_ros...so copied 13 | inline ros::Time to_ros_time(uint64_t ts) { 14 | ros::Time t; 15 | t.fromNSec(ts); 16 | return t; 17 | } 18 | 19 | // Not defined in a header in ouster_ros...so copied 20 | inline ros::Time to_ros_time(std::chrono::nanoseconds ts) { 21 | return to_ros_time(ts.count()); 22 | } 23 | 24 | std::string get_metadata(rosbag::Bag& bag){ 25 | // Grab single message on /os_node/metadata 26 | std::string info; 27 | 28 | for(rosbag::MessageInstance const m: rosbag::View(bag, rosbag::TopicQuery("/os_node/metadata"))) 29 | { 30 | std_msgs::String::ConstPtr str = m.instantiate(); 31 | if (str != nullptr) 32 | info = str->data; 33 | } 34 | 35 | return info; 36 | } 37 | 38 | sensor_msgs::ImuPtr PacketMsg_to_Imu(const ouster_ros::PacketMsg::ConstPtr& pm, ouster::sensor::sensor_info& info) { 39 | // Follow the logic from os_cloud_node.cpp in the ouster_ros driver 40 | // Specifically we use the sensor time code path 41 | auto pf = ouster::sensor::get_format(info); 42 | ros::Time msg_ts = to_ros_time(pf.imu_gyro_ts(pm->buf.data())); 43 | sensor_msgs::Imu imu_msg = ouster_ros::packet_to_imu_msg(*pm, msg_ts, "os_imu", pf); 44 | sensor_msgs::ImuPtr imu_msg_ptr = 45 | boost::make_shared(imu_msg); 46 | return imu_msg_ptr; 47 | } 48 | 49 | sensor_msgs::PointCloud2Ptr LidarScan_to_PointCloud(ouster::LidarScan& ls, ouster::XYZLut& xyz_lut, ouster::sensor::sensor_info& info) { 50 | // Follow the logic from os_cloud_node.cpp in the ouster_ros driver 51 | // Specifically we use the sensor time code path 52 | sensor_msgs::PointCloud2Ptr pc_ptr; 53 | 54 | auto ts_v = ls.timestamp(); 55 | auto idx = std::find_if(ts_v.data(), ts_v.data() + ts_v.size(), 56 | [](uint64_t h) { return h != 0; }); 57 | if (idx == ts_v.data() + ts_v.size()) return pc_ptr; 58 | auto scan_ts = std::chrono::nanoseconds{ts_v(idx - ts_v.data())}; 59 | ros::Time msg_ts = to_ros_time(scan_ts); 60 | 61 | uint32_t H = info.format.pixels_per_column; 62 | uint32_t W = info.format.columns_per_frame; 63 | ouster_ros::Cloud cloud{W,H}; 64 | 65 | ouster_ros::scan_to_cloud(xyz_lut, scan_ts, ls, cloud, 0); 66 | 67 | sensor_msgs::PointCloud2 pc = 68 | ouster_ros::cloud_to_cloud_msg(cloud, msg_ts, "os_lidar"); 69 | pc_ptr = boost::make_shared(pc); 70 | 71 | return pc_ptr; 72 | } 73 | 74 | int main(int argc, char *argv[]) 75 | { 76 | if(argc < 1){ 77 | std::cout << "rosrun ouster_bag_convert ouster_bag_converter " << std::endl; 78 | return 1; 79 | } 80 | 81 | std::string read_bag_fn{argv[1]}; 82 | std::string write_bag_fn{argv[2]}; 83 | 84 | std::cout << "=== Opening ===" << std::endl; 85 | std::cout << " - Reading: " << read_bag_fn << std::endl; 86 | std::cout << " - Writing: " << write_bag_fn << std::endl; 87 | 88 | // Read bag with topics to pull packets from 89 | rosbag::Bag read_bag; 90 | read_bag.open(read_bag_fn, rosbag::bagmode::Read); 91 | const std::string read_topic_lidar{"/os_node/lidar_packets"}; 92 | const std::string read_topic_imu{"/os_node/imu_packets"}; 93 | 94 | // Write bag with topics to write to 95 | rosbag::Bag write_bag; 96 | write_bag.open(write_bag_fn, rosbag::bagmode::Write); 97 | const std::string write_topic_lidar{"/os_node/points"}; 98 | const std::string write_topic_imu{"/os_node/imu"}; 99 | 100 | // Grab Ouster metadata from bag 101 | std::string ouster_metadata = get_metadata(read_bag); 102 | 103 | ouster::sensor::sensor_info info = ouster::sensor::parse_metadata(ouster_metadata); 104 | ouster::XYZLut xyz_lut = ouster::make_xyz_lut(info); 105 | uint32_t H = info.format.pixels_per_column; 106 | uint32_t W = info.format.columns_per_frame; 107 | std::unique_ptr scan_batcher = std::make_unique(info); 108 | ouster::LidarScan ls{W, H, info.format.udp_profile_lidar}; 109 | 110 | std::cout << "Starting loop" << std::endl; 111 | int i = 0; 112 | 113 | for(rosbag::MessageInstance const m: rosbag::View(read_bag, rosbag::TopicQuery({read_topic_lidar, read_topic_imu}))) 114 | { 115 | ouster_ros::PacketMsg::ConstPtr pm = m.instantiate(); 116 | 117 | if(m.getTopic() == read_topic_imu) { 118 | auto imu_ptr = PacketMsg_to_Imu(pm, info); 119 | if(!imu_ptr)continue; 120 | 121 | write_bag.write(write_topic_imu, m.getTime(), imu_ptr); 122 | 123 | } else if(m.getTopic() == read_topic_lidar) { 124 | if(!(*scan_batcher)(pm->buf.data(), ls))continue; // Not a full scan 125 | auto pc_ptr = LidarScan_to_PointCloud(ls, xyz_lut, info); 126 | if(!pc_ptr)continue; 127 | 128 | write_bag.write(write_topic_lidar, m.getTime(), pc_ptr); 129 | } 130 | 131 | if (i++ % 1000 == 0) std::cout << "+" << std::flush; 132 | } 133 | std::cout << std::endl << "Done" << std::endl; 134 | 135 | read_bag.close(); 136 | write_bag.close(); 137 | 138 | return 0; 139 | } 140 | -------------------------------------------------------------------------------- /build_system/lidar_depth/pcd2gt.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | # Do not run this script for any calibration bags. 5 | if [[ "$BAG_NAME" == *"calib"* ]]; then 6 | echo -e "${YELLOW}Skipping calibration bag: $BAG_NAME${NC}" 7 | exit 0 8 | fi 9 | 10 | # output_files includes TRAJ_PATH, PCD_LOCAL_PATH, and GT_PATH 11 | output_files=("$GT_POSE_PATH" "$GT_DEPTH_PATH") 12 | check_files output_files 13 | check_free_space fixed 14 | 15 | # Generate GT 16 | echo -e "${BLUE}Starting GT generation${NC}" 17 | python3 build_system/lidar_depth/fasterlio_gt.py \ 18 | --pcd_folder "$PCD_LOCAL_PATH" \ 19 | --traj_fn "$TRAJ_PATH" \ 20 | --timesync_fn "$TIME_CORRECTIONS_PATH" \ 21 | --calib_h5 "$H5_PATH" \ 22 | --pose_h5_fn "$GT_POSE_PATH.tmp" \ 23 | --depth_h5_fn "$GT_DEPTH_PATH.tmp" \ 24 | --scan_aggregation "$DEPTH_SCAN_AGGREGATION" 25 | if [ $? -ne 0 ]; then 26 | echo -e "${RED}Error generating GT${NC}" 27 | exit 1 28 | fi 29 | echo -e "${GREEN}GT generation finished${NC}" 30 | 31 | # Move file to final destination 32 | mv "$GT_POSE_PATH.tmp" "$GT_POSE_PATH" 33 | mv "$GT_DEPTH_PATH.tmp" "$GT_DEPTH_PATH" 34 | -------------------------------------------------------------------------------- /build_system/lidar_depth/rosbag2oustermetadata.py: -------------------------------------------------------------------------------- 1 | import roslib 2 | import rospy 3 | import rosbag 4 | from sensor_msgs.msg import CompressedImage, Image 5 | 6 | import argparse 7 | import os 8 | import shutil 9 | 10 | import h5py 11 | import numpy as np 12 | import math 13 | import cv2 14 | from cv_bridge import CvBridge, CvBridgeError 15 | bridge = CvBridge() 16 | import pdb 17 | from collections import defaultdict 18 | 19 | from tqdm import tqdm 20 | 21 | def get_ouster_info(bag, topic="/os_node/metadata"): 22 | for topic, msg, t in bag.read_messages(topics=[topic]): 23 | ouster_metadata = msg.data 24 | return ouster_metadata 25 | 26 | def process_bag(filename, ouster_fn=None): 27 | # Open bag 28 | bag = rosbag.Bag(filename) 29 | metadata = get_ouster_info(bag) 30 | 31 | print("Saving metadata to -- %s" % ouster_fn) 32 | with open(ouster_fn, 'w') as f: 33 | f.write(metadata) 34 | 35 | if __name__ == "__main__": 36 | import argparse 37 | parser = argparse.ArgumentParser() 38 | parser.add_argument('--bag', help='ROS bag name') 39 | parser.add_argument('--ouster_fn', default="/tmp/ouster_metadata.json", help='Ouster Metadata Name') 40 | args = parser.parse_args() 41 | 42 | process_bag(args.bag, args.ouster_fn) 43 | -------------------------------------------------------------------------------- /build_system/lidar_depth/rosbag2pcd.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | # Do not run this script for any calibration bags. 5 | if [[ "$BAG_NAME" == *"calib"* ]]; then 6 | echo -e "${YELLOW}Skipping calibration bag: $BAG_NAME${NC}" 7 | exit 0 8 | fi 9 | 10 | # output_files includes TRAJ_PATH, PCD_LOCAL_PATH, and GT_PATH 11 | output_files=("$TRAJ_PATH" "$PCD_LOCAL_PATH" "$PCD_GLOBAL_PATH" "$PCD_LOCAL_PATH_COMPRESSED") 12 | check_files output_files 13 | 14 | check_free_space fixed 15 | 16 | # Link the ouster_bag_convert directory to the current ROS src dir and build 17 | if [ ! -L ~/catkin_ws/src/ouster_bag_convert ]; then 18 | ln -s "$(pwd)"/build_system/lidar_depth/ouster_bag_convert ~/catkin_ws/src/ouster_bag_convert 19 | fi 20 | # same for concatenate_pcd_uncompressed 21 | if [ ! -L ~/catkin_ws/src/concatenate_pcd_uncompressed ]; then 22 | ln -s "$(pwd)"/build_system/lidar_depth/concatenate_pcd_uncompressed ~/catkin_ws/src/concatenate_pcd_uncompressed 23 | fi 24 | pushd ~/catkin_ws/src 25 | catkin build --no-status 26 | popd 27 | . ~/catkin_ws/devel/setup.bash 28 | 29 | echo -e "${BLUE}Converting $BAG_NAME to $CONVERTED_BAG_PATH${NC}" 30 | rosrun ouster_bag_convert ouster_bag_converter "$BAG_PATH" "$CONVERTED_BAG_PATH" 31 | if [ $? -ne 0 ]; then 32 | echo -e "${RED}Error running ouster_bag_convert${NC}" 33 | exit 1 34 | fi 35 | echo -e "${GREEN}Conversion done${NC}" 36 | 37 | echo -e "${YELLOW}Original bag: $BAG_PATH${NC}" 38 | echo -e "${YELLOW}Converted bag: $CONVERTED_BAG_PATH${NC}" 39 | echo -e "${YELLOW}PCD global file: $PCD_GLOBAL_PATH${NC}" 40 | 41 | current_dir=$(pwd) 42 | # Create a Log dir where the trajectory can be saved 43 | mkdir -p Log 44 | roscd faster_lio 45 | mkdir -p PCD 46 | 47 | # Run Faster-LIO to get the individual PCs 48 | rm -rf "$PCD_LOCAL_PATH" 49 | cd "$current_dir" 50 | echo -e "${BLUE}FasterLIO individual${NC}" 51 | echo -e "${YELLOW}Config file: ./lidar_depth/$(echo $FASTER_LIO_CONFIG).yaml${NC}" 52 | cat build_system/lidar_depth/$(echo $FASTER_LIO_CONFIG).yaml 53 | rosrun faster_lio run_mapping_offline \ 54 | --bag_file "$CONVERTED_BAG_PATH" \ 55 | --config_file "./build_system/lidar_depth/$(echo $FASTER_LIO_CONFIG).yaml" 56 | if [ $? -ne 0 ]; then 57 | echo -e "${RED}Error running FasterLIO local${NC}" 58 | exit 1 59 | fi 60 | echo -e "${GREEN}FasterLIO individual done${NC}" 61 | chmod 777 Log/traj.bin 62 | mv Log/traj.bin "$TRAJ_PATH" 63 | roscd faster_lio 64 | chmod 777 PCD 65 | chmod 666 PCD/* 66 | mv PCD "$PCD_LOCAL_PATH" 67 | 68 | # Create integrated global PCD 69 | # TODO: remove this once Docker build is fixed 70 | echo -e "${BLUE}Concatenate point clouds for global PC${NC}" 71 | roscd concatenate_pcd_uncompressed 72 | rosrun concatenate_pcd_uncompressed concatenate_pcd_uncompressed "$PCD_LOCAL_PATH"/*.pcd 73 | chmod 666 output.pcd 74 | mv output.pcd "$PCD_GLOBAL_PATH" 75 | 76 | # Compress local PCDs into a zip file 77 | echo -e "${BLUE}Compressing local PCs${NC}" 78 | PCD_LOCAL_PATH_COMPRESSED_TMP="${PCD_LOCAL_PATH_COMPRESSED%.*}_tmp.${PCD_LOCAL_PATH_COMPRESSED##*.}" 79 | tar cjf "$PCD_LOCAL_PATH_COMPRESSED_TMP" -C "$PCD_LOCAL_PATH" . 80 | if [ $? -ne 0 ]; then 81 | echo -e "${RED}Error compressing local PCs${NC}" 82 | exit 1 83 | fi 84 | echo -e "${GREEN}Compressing local PCs done${NC}" 85 | chmod 666 "$PCD_LOCAL_PATH_COMPRESSED_TMP" 86 | mv "$PCD_LOCAL_PATH_COMPRESSED_TMP" "$PCD_LOCAL_PATH_COMPRESSED" 87 | -------------------------------------------------------------------------------- /build_system/lidar_depth/short_range_ouster64.yaml: -------------------------------------------------------------------------------- 1 | common: 2 | lid_topic: "/os_node/points" 3 | imu_topic: "/os_node/imu" 4 | time_sync_en: false # ONLY turn on when external time synchronization is really not possible 5 | 6 | preprocess: 7 | lidar_type: 3 # 1 for Livox serials LiDAR, 2 for Velodyne LiDAR, 3 for ouster LiDAR, 8 | scan_line: 64 9 | blind: 1 10 | time_scale: 1e-3 11 | 12 | mapping: 13 | acc_cov: 0.4 14 | gyr_cov: 0.4 15 | b_acc_cov: 0.0002 16 | b_gyr_cov: 0.0002 17 | fov_degree: 180 18 | det_range: 150.0 19 | extrinsic_est_en: false # true: enable the online estimation of IMU-LiDAR extrinsic 20 | extrinsic_T: [ -0.00625, 0.01175, -0.007645 ] 21 | extrinsic_R: [1, 0, 0, 22 | 0, 1, 0, 23 | 0, 0, 1] 24 | 25 | publish: 26 | path_publish_en: false 27 | scan_publish_en: true # false: close all the point cloud output 28 | scan_effect_pub_en: true # true: publish the pointscloud of effect point 29 | dense_publish_en: false # false: low down the points number in a global-frame point clouds scan. 30 | scan_bodyframe_pub_en: true # true: output the point cloud scans in IMU-body-frame 31 | 32 | path_save_en: true 33 | 34 | pcd_save: 35 | pcd_save_en: true 36 | interval: 1 # how many LiDAR frames saved in each pcd file; 37 | # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames. 38 | feature_extract_enable: false 39 | point_filter_num: 3 40 | max_iteration: 4 41 | filter_size_surf: 0.10 42 | filter_size_map: 0.10 # 暂时未用到,代码中为0, 即倾向于将降采样后的scan中的所有点加入map 43 | cube_side_length: 500 44 | 45 | ivox_grid_resolution: 0.10 # default=0.2 46 | ivox_nearby_type: 26 # 6, 18, 26 47 | esti_plane_threshold: 0.1 # default=0.1 48 | -------------------------------------------------------------------------------- /build_system/preamble.bash: -------------------------------------------------------------------------------- 1 | # Preamble file. This will be executed before any bash file to create variables, 2 | # colors, and bash options 3 | 4 | # ==================== MAIN PREAMBLE ========================== 5 | set -eo pipefail 6 | 7 | # Colors 8 | RED='\033[0;31m' 9 | GREEN='\033[0;32m' 10 | BLUE='\033[0;34m' 11 | YELLOW='\e[1;33m' 12 | MAGENTA='\e[1;35m' 13 | NC='\033[0m' # No Color 14 | 15 | # Check that we have one argument and it is not null, except for script 16 | # hdf52internimage.bash 17 | if [[ "$0" != *"hdf52internimage.bash"* ]]; then 18 | if [ $# -ne 1 ]; then 19 | echo "Usage: $0 bag_name" 20 | exit 1 21 | fi 22 | if [ -z "$1" ]; then 23 | echo "Usage: $0 bag_name" 24 | exit 1 25 | fi 26 | fi 27 | 28 | # Load env variables from python 29 | eval $(python3 build_system/dataset_paths.py --bag_name "$1") 30 | 31 | # Check that BAG_NAME is set, otherwise the eval may have failed 32 | if [ -z "${BAG_NAME+x}" ]; then 33 | echo -e "${RED}Error: BAG_NAME variable not set.${NC}" 34 | exit 1 35 | fi 36 | 37 | # Load ROS env 38 | . ~/catkin_ws/devel/setup.bash 39 | 40 | # Skip most scripts for "falcon_calib_imu" bags (except IMU calib), as the 41 | # synchronization was note performed correctly. This is not a big problem, as 42 | # the bags are only used for imu calibration (between imu and cameras) 43 | if [[ "$BAG_PATH" == *"falcon_imu_calib"* ]] && [[ "$0" != *"rosbag2imucalibration.bash"* ]]; then 44 | echo -e "${YELLOW}Skipping script for $BAG_PATH${NC}" 45 | echo -e "${YELLOW}These bags synchronization is not right. $BAG_PATH${NC}" 46 | echo -e "${YELLOW}This is not a problem, as the bags are only used for imu calibration${NC}" 47 | exit 0 48 | fi 49 | 50 | # Print the name of the script, the BAG_NAME and the start date in purple 51 | echo -e "${MAGENTA}===================================================${NC}" 52 | echo -e "${MAGENTA}$0 $BAG_NAME" 53 | echo -e "${MAGENTA}$(date)${NC}" 54 | echo -e "${MAGENTA}===================================================${NC}" 55 | 56 | # ==================== FUNCTIONS ============================== 57 | 58 | function check_files { 59 | # Check that the files do not exist already, and exit the script gracefully if 60 | # it does. An array with the list of files should be provide as an argument. 61 | # If some files exist (but not all of them), deletes all the files in the 62 | # array before proceeding. 63 | local array_name=$1 64 | 65 | # Check if the variable output_fns exists 66 | if [ -z "${!array_name+x}" ]; then 67 | echo -e "${RED}$array_name is unset${NC}"; 68 | return 1 69 | fi 70 | 71 | # An array of output_fns 72 | local -n output_fns=$1 73 | 74 | # Variable to check if all files exist 75 | local all_files_exist=1 76 | 77 | # Check that the variable output_fns exist 78 | if [ -z "$output_fns" ]; then 79 | echo -e "${RED}Error: output_fns variable not set.${NC}" 80 | exit 1 81 | fi 82 | 83 | # Check if each file exists 84 | all_files_exist=1 85 | for file in "${output_fns[@]}" 86 | do 87 | # check if file or directory exists 88 | if [ ! -e "$file" ]; then 89 | all_files_exist=0 90 | echo -e "${YELLOW}File $file does not exist${NC}" 91 | else 92 | echo -e "${BLUE}File $file exists${NC}" 93 | fi 94 | done 95 | 96 | # If all files exist 97 | if [ $all_files_exist -eq 1 ]; then 98 | echo -e "${GREEN}All output files exist. Stopping script.${NC}" 99 | exit 0 100 | else 101 | # If any file does not exist, delete all the other files 102 | echo -e "${RED}Not all output files exist. Removing all output files...${NC}" 103 | for file in "${output_fns[@]}" 104 | do 105 | if [ -e "$file" ]; then 106 | rm -rf "$file" 107 | echo -e "${BLUE}Deleted $file${NC}" 108 | rm -rf "$file.tmp" 109 | echo -e "${BLUE}Deleted $file.tmp${NC}" 110 | fi 111 | done 112 | fi 113 | } 114 | 115 | 116 | function compress_with_ffmpeg { 117 | # Takes a raw video (FFV1) as input and generates compressed versions of that 118 | # video in the same folder (with different extensions) 119 | 120 | # Which videos to create? 121 | export CREATE_WEBM_VIDEOS=0 122 | export CREATE_MP4_VIDEOS=1 123 | 124 | if [ "$#" -ne 1 ]; then 125 | echo -e "${RED}compress_with_ffmpeg requires only one argument.${NC}" 126 | return 1 127 | fi 128 | RAW_VIDEO=$1 129 | # Check that input file exists and ends in .avi 130 | if [ ! -e "$RAW_VIDEO" ]; then 131 | echo -e "${RED}Error: File $RAW_VIDEO does not exist.${NC}" 132 | return 1 133 | fi 134 | 135 | # if CREATE_MP4_VIDEOS is set, create mp4 video 136 | if [ $CREATE_MP4_VIDEOS -eq 1 ]; then 137 | # Remplace the .avi extension for MP4 138 | MP4_VIDEO="${RAW_VIDEO%.*}.mp4" 139 | # TMP variable where the temp video will be stored 140 | MP4_VIDEO_TMP="${MP4_VIDEO%.*}_tmp.${MP4_VIDEO##*.}" 141 | rm -f "$MP4_VIDEO_TMP" 142 | 143 | echo -e "${BLUE}Compressing $RAW_VIDEO to MP4${NC}" 144 | ffmpeg -hide_banner -loglevel error -i "$RAW_VIDEO" \ 145 | -c:v libx264 -preset slow -crf 21 -pix_fmt yuv420p "$MP4_VIDEO_TMP" 146 | if [ $? -ne 0 ]; then 147 | echo -e "${RED}Error compressing $MP4_VIDEO_TMP${NC}" 148 | rm -f "$MP4_VIDEO_TMP" 149 | exit 1 150 | fi 151 | # Move the temp video to the final video 152 | chmod 666 "$MP4_VIDEO_TMP" 153 | mv "$MP4_VIDEO_TMP" "$MP4_VIDEO" 154 | else 155 | echo -e "${YELLOW}CREATE_MP4_VIDEOS is set to 0. Skipping MP4 video creation${NC}" 156 | fi 157 | 158 | # Same for webm 159 | if [ $CREATE_WEBM_VIDEOS -eq 1 ]; then 160 | # Remplace the .avi extension for wEBM 161 | WEBM_VIDEO="${RAW_VIDEO%.*}.webm" 162 | # TMP variable where the temp video will be stored 163 | WEBM_VIDEO_TMP="${WEBM_VIDEO%.*}_tmp.${WEBM_VIDEO##*.}" 164 | rm -f "$WEBM_VIDEO_TMP" 165 | 166 | echo -e "${BLUE}Compressing $RAW_VIDEO to WEBM${NC}" 167 | ffmpeg -hide_banner -loglevel error -i "$RAW_VIDEO" -c:v libvpx-vp9 -threads 8 -row-mt 1 -b:v 0 -crf 30 -pass 1 -an -f null /dev/null && \ 168 | ffmpeg -hide_banner -loglevel error -i "$RAW_VIDEO" -c:v libvpx-vp9 -threads 8 -row-mt 1 -b:v 0 -crf 30 -pass 2 -an "$WEBM_VIDEO_TMP" 169 | if [ $? -ne 0 ]; then 170 | echo -e "${RED}Error compressing $WEBM_VIDEO_TMP${NC}" 171 | rm -f "$WEBM_VIDEO_TMP" 172 | exit 1 173 | fi 174 | # Move the temp video to the final video 175 | chmod 666 "$WEBM_VIDEO_TMP" 176 | mv "$WEBM_VIDEO_TMP" "$WEBM_VIDEO" 177 | else 178 | echo -e "${YELLOW}CREATE_WEBM_VIDEOS is set to 0. Skipping MP4 video creation${NC}" 179 | fi 180 | } 181 | 182 | function check_free_space { 183 | # Check that we have enough free space before processing the file. The first 184 | # argument corresponds to the type of check: 185 | # - fixed: check that we have at least 50 GiB available 186 | # - bag_multiple: check that we have at least 5x the size of BAG_PATH 187 | 188 | local CHECK_TYPE=$1 189 | local FIXED_GB=$((50 * 1024 * 1024)) # 50GiB in KiB 190 | local AVAILABLE 191 | AVAILABLE=$(df "$OUTPUT_PATH" | tail -1 | awk '{print $4}') # Available space in KiB 192 | local BAG_SIZE 193 | BAG_SIZE=$(du -sk "$BAG_PATH" | cut -f1) # Size of bag file in KiB 194 | 195 | if [ "$CHECK_TYPE" == "fixed" ]; then 196 | if [ "$AVAILABLE" -lt "$FIXED_GB" ]; then 197 | echo -e "${RED}Not enough free space. Required: 50 GiB, Available: $((AVAILABLE / 1024 / 1024)) GiB${NC}" 198 | return 1 199 | fi 200 | elif [ "$CHECK_TYPE" == "bag_multiple" ]; then 201 | local REQUIRED_SPACE_BAG=$((BAG_SIZE * 5)) 202 | if [ "$AVAILABLE" -lt "$REQUIRED_SPACE_BAG" ]; then 203 | echo -e "${RED}Not enough free space. Required: $(($REQUIRED_SPACE_BAG / 1024 / 1024)) GiB, Available: $((AVAILABLE / 1024 / 1024)) GiB${NC}" 204 | return 1 205 | fi 206 | else 207 | echo -e "${RED}Invalid check type. Must be 'fixed' or 'bag_multiple'${NC}" 208 | return 1 209 | fi 210 | } 211 | -------------------------------------------------------------------------------- /build_system/semantics/hdf52internimage.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ./build_system/preamble.bash 4 | 5 | # Check if the INTERNIMAGE_PATH is set, otherwise skip this file 6 | if [ -z "$INTERNIMAGE_PATH" ]; then 7 | echo -e "${YELLOW}INTERNIMAGE_PATH is not set, skipping this file${NC}" 8 | exit 0 9 | fi 10 | 11 | # This is a special script, check that we have two arguments 12 | if [ "$#" -ne 2 ]; then 13 | echo -e "${RED}Usage $0 bag_name gpu_to_run${NC}" 14 | exit 1 15 | fi 16 | 17 | output_files=("$INTERNIMAGE_PATH") 18 | check_files output_files 19 | check_free_space fixed 20 | 21 | # Add a .tmp to INTERNIMAGE_PATH for the temporary path 22 | INTERNIMAGE_PATH_TMP="$INTERNIMAGE_PATH.tmp" 23 | 24 | # GPU_TARGET is the second argument 25 | GPU_TARGET="$2" 26 | TOTAL_GPUS=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) 27 | 28 | # Check that GPU TARGET is a number between 0 and TOTAL_GPUS 29 | if ! [[ "$GPU_TARGET" =~ ^[0-9]+$ ]] ; then 30 | echo -e "${RED}GPU_TARGET is not a number${NC}" 31 | exit 1 32 | fi 33 | if [ "$GPU_TARGET" -lt 0 ] || [ "$GPU_TARGET" -ge "$TOTAL_GPUS" ]; then 34 | echo -e "${RED}GPU_TARGET is not between 0 and $TOTAL_GPUS${NC}" 35 | exit 1 36 | fi 37 | 38 | shift 2 # Remove the first and second argument, otherwise activate will complain 39 | 40 | # Activate the preexisting environment 41 | source ~/anaconda3/bin/activate 42 | conda activate internimage 43 | 44 | # Compile InternImage segmentation 45 | pushd ~/InternImage/segmentation/ops_dcnv3 46 | sh ./make.sh 47 | popd 48 | 49 | # Link Internimage to the semantics folder 50 | cd build_system/semantics/ 51 | # Create links only if they do not exists 52 | if [ ! -L configs ]; then 53 | ln -s ~/InternImage/segmentation/configs . 54 | fi 55 | if [ ! -L mmcv_custom ]; then 56 | ln -s ~/InternImage/segmentation/mmcv_custom . 57 | fi 58 | if [ ! -L mmseg_custom ]; then 59 | ln -s ~/InternImage/segmentation/mmseg_custom . 60 | fi 61 | if [ ! -L ops_dcnv3 ]; then 62 | ln -s ~/InternImage/segmentation/ops_dcnv3 . 63 | fi 64 | cd ../.. 65 | 66 | # run the script. 67 | echo -e "${BLUE}Running InternImage on GPU $GPU_TARGET${NC}" 68 | # Set CUDA env variables 69 | export CUDA_DEVICE_ORDER=PCI_BUS_ID 70 | export CUDA_VISIBLE_DEVICES="$GPU_TARGET" 71 | python build_system/semantics/internimage.py \ 72 | --h5fn "$H5_PATH" \ 73 | --config ./build_system/semantics/configs/cityscapes/upernet_internimage_l_512x1024_160k_cityscapes.py \ 74 | --checkpoint /M3ED_Build/input/InternImage/upernet_internimage_l_512x1024_160k_cityscapes.pth \ 75 | --palette ade20k \ 76 | --img_idx 1000 \ 77 | --opacity 1 \ 78 | --out_h5fn "$INTERNIMAGE_PATH_TMP" 79 | if [ $? -ne 0 ]; then 80 | echo -e "${RED}Error running InternImage${NC}" 81 | exit 1 82 | fi 83 | echo -e "${GREEN}Done InternImage${NC}" 84 | 85 | # Move the temp file to the final location 86 | mv "$INTERNIMAGE_PATH_TMP" "$INTERNIMAGE_PATH" 87 | -------------------------------------------------------------------------------- /build_system/semantics/internimage.py: -------------------------------------------------------------------------------- 1 | # Some of these code was written with the sample code for InternImage 2 | # Copyright (c) OpenMMLab. All rights reserved. 3 | # See https://github.com/OpenGVLab/InternImage/blob/master/segmentation/image_demo.py 4 | 5 | import cv2 6 | import os 7 | import sys 8 | import numpy as np 9 | import h5py 10 | import pdb 11 | import argparse 12 | import subprocess 13 | from datetime import datetime 14 | 15 | import matplotlib.pyplot as plt 16 | 17 | from argparse import ArgumentParser 18 | 19 | 20 | def intern_image_model_load(args): 21 | import mmcv 22 | 23 | import mmcv_custom # noqa: F401,F403 24 | import mmseg_custom # noqa: F401,F403 25 | from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot 26 | from mmseg.core.evaluation import get_palette 27 | from mmcv.runner import load_checkpoint 28 | from mmseg.core import get_classes 29 | import cv2 30 | 31 | # build the model from a config file and a checkpoint file 32 | 33 | model = init_segmentor(args.config, checkpoint=None, device=args.device) 34 | checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') 35 | if 'CLASSES' in checkpoint.get('meta', {}): 36 | model.CLASSES = checkpoint['meta']['CLASSES'] 37 | else: 38 | model.CLASSES = get_classes(args.palette) 39 | 40 | return model 41 | 42 | def intern_image_model_inference(args, model, img): 43 | from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot 44 | result = inference_segmentor(model, img) 45 | return result 46 | 47 | def intern_image_inference_save(args, model, img, result, idx): 48 | import mmcv 49 | from mmseg.core.evaluation import get_palette 50 | if hasattr(model, 'module'): 51 | model = model.module 52 | img = model.show_result(img, result, palette=get_palette(args.palette), 53 | show=False, opacity=args.opacity) 54 | mmcv.mkdir_or_exist(args.out) 55 | outpath = os.path.join(args.out, "%05d.png" % idx) 56 | cv2.imwrite(outpath, img) 57 | 58 | def invert_map(F): 59 | # shape is (h, w, 2), an "xymap" 60 | (h, w) = F.shape[:2] 61 | I = np.zeros_like(F) 62 | I[:,:,1], I[:,:,0] = np.indices((h, w)) # identity map 63 | P = np.copy(I) 64 | for i in range(10): 65 | correction = I - cv2.remap(F, P, None, interpolation=cv2.INTER_LINEAR) 66 | P += correction * 0.5 67 | return P 68 | 69 | def load_remapping(target_group, source_group): 70 | target_T_to_prophesee_left = target_group['T_to_prophesee_left'][...] 71 | source_T_to_prophesee_left = source_group['T_to_prophesee_left'][...] 72 | 73 | source_T_target = source_T_to_prophesee_left @ np.linalg.inv( target_T_to_prophesee_left ) 74 | target_T_source = np.linalg.inv(source_T_target) 75 | 76 | target_model = target_group['camera_model'] 77 | target_dist_coeffs = target_group['distortion_coeffs'][...] 78 | target_dist_model = target_group['distortion_model'] 79 | target_intrinsics = target_group['intrinsics'][...] 80 | target_res = target_group['resolution'][...] 81 | target_Size = target_res 82 | 83 | target_K = np.eye(3) 84 | target_K[0,0] = target_intrinsics[0] 85 | target_K[1,1] = target_intrinsics[1] 86 | target_K[0,2] = target_intrinsics[2] 87 | target_K[1,2] = target_intrinsics[3] 88 | 89 | target_P = np.zeros((3,4)) 90 | target_P[:3,:3] = target_K 91 | 92 | source_model = source_group['camera_model'] 93 | source_dist_coeffs = source_group['distortion_coeffs'][...] 94 | source_dist_model = source_group['distortion_model'] 95 | source_intrinsics = source_group['intrinsics'][...] 96 | source_res = source_group['resolution'][...] 97 | source_width, source_height = source_res 98 | source_Size = source_res 99 | 100 | source_K = np.eye(3) 101 | source_K[0,0] = source_intrinsics[0] 102 | source_K[1,1] = source_intrinsics[1] 103 | source_K[0,2] = source_intrinsics[2] 104 | source_K[1,2] = source_intrinsics[3] 105 | 106 | source_P = np.zeros((3,4)) 107 | source_P[:3,:3] = target_K 108 | source_P[0,3] = target_K[0,0] * target_T_source[0,3] 109 | source_P[1,3] = target_K[1,1] * target_T_source[1,3] 110 | 111 | target_R = target_T_source[:3,:3] # np.eye(3) 112 | source_R = np.eye(3) # target_T_source[:3,:3] 113 | 114 | print(target_R) 115 | print(source_R) 116 | print(target_P) 117 | print(source_P) 118 | 119 | map_target = np.stack(cv2.initUndistortRectifyMap(target_K, target_dist_coeffs, target_R, target_P, target_Size, cv2.CV_32FC1), axis=-1) 120 | map_source = np.stack(cv2.initUndistortRectifyMap(source_K, source_dist_coeffs, source_R, source_P, source_Size, cv2.CV_32FC1), axis=-1) 121 | inv_map_target = invert_map(map_target) 122 | inv_map_source = invert_map(map_source) 123 | 124 | return map_target, map_source, inv_map_target, inv_map_source 125 | 126 | def create_full_map_and_mask(source_map, target_inv_map): 127 | source_to_target_map = cv2.remap(source_map, target_inv_map, None, interpolation=cv2.INTER_LINEAR ) 128 | source_to_target_mask = cv2.remap(source_map, target_inv_map, None, interpolation=cv2.INTER_LINEAR, borderValue=-1 ) 129 | source_to_target_mask = source_to_target_mask[:,:,0] == -1 130 | 131 | return source_to_target_map, source_to_target_mask 132 | 133 | def remap_and_mask(remap_grid, remap_mask, img): 134 | img_remap = cv2.remap(img, remap_grid[:,:,0], remap_grid[:,:,1], cv2.INTER_LINEAR) 135 | img_remap[remap_mask] = 0 136 | 137 | if img_remap.ndim == 2: 138 | img_remap = img_remap[:,:,None] 139 | return img_remap 140 | 141 | def remap_mask(remap_grid, remap_mask, orig_resolution=None): 142 | if orig_resolution is None: 143 | orig_resolution = remap_grid[:,:,0].shape 144 | mask = np.ones( orig_resolution, dtype=np.uint8 ) * 255 145 | mask_remapped = remap_and_mask(remap_grid, remap_mask, mask) 146 | mask_remapped = cv2.medianBlur(mask_remapped,3) 147 | return mask_remapped 148 | 149 | def view_remap_topics(remap_grid, remap_mask, image_group, time_lookup, event_group, img_idx=2190): 150 | img = image_group['data'][img_idx] 151 | 152 | img_remap = remap_and_mask(remap_grid, remap_mask, img) 153 | 154 | start_idx = int(time_lookup[img_idx] - 200000) 155 | end_idx = int(time_lookup[img_idx] + 0) 156 | 157 | x = event_group['x'][start_idx:end_idx] 158 | y = event_group['y'][start_idx:end_idx] 159 | p = event_group['p'][start_idx:end_idx] 160 | eimg = np.zeros((720,1280,3)).astype(np.uint8) 161 | eimg[y.astype(int),x.astype(int),0] = 255 162 | 163 | fig, axes = plt.subplots(2,2,sharex=True,sharey=True) 164 | 165 | axes[0,0].imshow(img) 166 | axes[0,1].imshow(img_remap) 167 | axes[1,0].imshow(eimg) 168 | 169 | overlay = eimg.copy() 170 | if img_remap.shape[-1] == 3: 171 | overlay[:,:,2] = np.linalg.norm(img_remap, axis=-1) 172 | else: 173 | overlay[:,:,2] = img_remap.squeeze() 174 | axes[1,1].imshow(overlay) 175 | 176 | plt.show() 177 | 178 | if __name__ == "__main__": 179 | parser = argparse.ArgumentParser() 180 | 181 | parser.add_argument("--h5fn", help="H5 file to load") 182 | parser.add_argument("--out_h5fn", help="H5 file to save to") 183 | parser.add_argument("--target", default="prophesee", help="Camera to warp to") 184 | parser.add_argument("--img_idx", type=int, default=2000, help="Index to load") 185 | parser.add_argument('--config', help='Config file') 186 | parser.add_argument('--checkpoint', help='Checkpoint file') 187 | parser.add_argument('--out', type=str, default="demo", help='out dir') 188 | parser.add_argument("--verbose", action="store_true", 189 | help="Verbose output") 190 | parser.add_argument( 191 | '--device', default='cuda:0', help='Device used for inference') 192 | parser.add_argument( 193 | '--palette', 194 | default='ade20k', 195 | choices=['ade20k', 'cityscapes', 'cocostuff'], 196 | help='Color palette used for segmentation map') 197 | parser.add_argument( 198 | '--opacity', 199 | type=float, 200 | default=0.5, 201 | help='Opacity of painted segmentation map. In (0, 1] range.') 202 | 203 | args = parser.parse_args() 204 | 205 | h5f = h5py.File(args.h5fn, 'r') 206 | 207 | target_key = '/' + args.target + '/left' 208 | 209 | ovc_key = '/ovc/rgb' 210 | target_map, source_map, target_inv_map, source_inv_map = load_remapping( h5f[target_key]['calib'], h5f[ovc_key]['calib'] ) 211 | 212 | source_to_target_map, source_to_target_mask = create_full_map_and_mask(source_map, target_inv_map) 213 | 214 | # view_remap_topics(source_to_target_map, source_to_target_mask, h5f[ovc_key], h5f['/ovc/ts_map_prophesee_left_t'], h5f[target_key], img_idx=350) 215 | 216 | model = intern_image_model_load(args) 217 | h5f_out = h5py.File(args.out_h5fn, 'w') 218 | 219 | total_images = h5f[ovc_key]['data'].shape[0] 220 | remap_shape = source_to_target_map[:,:,0].shape 221 | 222 | semantics_shape = (total_images, remap_shape[0], remap_shape[1], 1) 223 | semantics_chunk = (1, remap_shape[0], remap_shape[1], 1) 224 | if "rgb" in ovc_key: 225 | warped_shape = (total_images, remap_shape[0], remap_shape[1], 3) 226 | warped_chunk = (1, remap_shape[0], remap_shape[1], 3) 227 | else: 228 | warped_shape = (total_images, remap_shape[0], remap_shape[1], 1) 229 | warped_chunk = (1, remap_shape[0], remap_shape[1], 1) 230 | 231 | h5f_out.attrs["origin_camera"] = ovc_key 232 | h5f_out.attrs["destination_camera"] = target_key 233 | 234 | version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() 235 | h5f_out.attrs["version"] = version 236 | h5f_out.attrs["creation_date"] = str(datetime.now()) 237 | 238 | 239 | h5f_out.create_dataset("ts", data=h5f['/ovc/ts']) 240 | h5f_out.create_dataset("ts_map_prophesee_left_t", data=h5f['/ovc/ts_map_prophesee_left_t']) 241 | 242 | h5f_out.create_dataset("warped_image", warped_shape, dtype='u1', chunks=warped_chunk, compression='lzf') 243 | h5f_out.create_dataset("predictions", semantics_shape, dtype='u1', chunks=semantics_chunk, compression='lzf') 244 | 245 | mask = remap_mask(source_to_target_map, source_to_target_mask) 246 | mask = (255.0 - mask).astype(np.uint8) 247 | mask = mask > 0 248 | 249 | from tqdm import tqdm 250 | for idx in tqdm(range(total_images), total=total_images, disable=not args.verbose): 251 | ovc_image = h5f[ovc_key]['data'][idx] 252 | ovc_image_remapped = remap_and_mask(source_to_target_map, source_to_target_mask, ovc_image) 253 | if ovc_image_remapped.shape[-1] == 1: 254 | input_image = np.tile(ovc_image_remapped, (1,1,3)) 255 | else: 256 | input_image = ovc_image_remapped 257 | 258 | input_image[mask] = 0 259 | 260 | result = intern_image_model_inference(args, model, input_image) 261 | result = result[0][:,:,None] 262 | masked_result = result 263 | masked_result[mask] = 255 264 | 265 | if "rgb" in ovc_key: 266 | h5f_out['warped_image'][idx] = ovc_image_remapped 267 | else: 268 | h5f_out['warped_image'][idx] = ovc_image_remapped 269 | 270 | h5f_out['predictions'][idx] = masked_result 271 | -------------------------------------------------------------------------------- /build_system/semantics/internimage2media.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | output_files=("$SEMANTICS_VIDEO_RAW") 5 | check_files output_files 6 | 7 | check_free_space fixed 8 | 9 | SEMANTICS_VIDEO_RAW_TMP="${SEMANTICS_VIDEO_RAW%.*}_tmp.${SEMANTICS_VIDEO_RAW##*.}" 10 | 11 | # Run media generation 12 | echo -e "${BLUE}Starting semantics video generation${NC}" 13 | python3 build_system/semantics/internimage2media.py \ 14 | --semantics_h5 "$INTERNIMAGE_PATH" \ 15 | --events_h5 "$H5_PATH" \ 16 | --outfn "$SEMANTICS_VIDEO_RAW_TMP" 17 | # Check if program exited with error 18 | if [ $? -ne 0 ]; then 19 | echo -e "${RED}Error creating media files for $H5_PATH${NC}" 20 | exit 1 21 | fi 22 | mv "$SEMANTICS_VIDEO_RAW_TMP" "$SEMANTICS_VIDEO_RAW" 23 | echo -e "${GREEN}Semantics raw created${NC}" 24 | 25 | # Compress video 26 | echo -e "${BLUE}Compressing${NC}" 27 | compress_with_ffmpeg "$SEMANTICS_VIDEO_RAW" 28 | echo -e "${GREEN}Compression finished${NC}" 29 | -------------------------------------------------------------------------------- /build_system/semantics/internimage2media.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | import yaml 5 | import pdb 6 | import numpy as np 7 | import cv2 8 | import h5py 9 | from matplotlib import cm 10 | import matplotlib.pyplot as plt 11 | from colorama import Fore, Style 12 | 13 | # https://github.com/open-mmlab/mmsegmentation/blob/00790766aff22bd6470dbbd9e89ea40685008395/mmseg/utils/class_names.py#L249C1-L249C1 14 | def cityscapes_palette(): 15 | """Cityscapes palette for external use.""" 16 | return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], 17 | [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], 18 | [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], 19 | [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], 20 | [0, 0, 230], [119, 11, 32]] 21 | 22 | def generate_rgb_check_video(ap): 23 | # get all the semantic labels 24 | 25 | img_w = 1280 26 | img_h = 720 27 | 28 | out_w = img_w * 3 29 | out_h = img_h 30 | fps = 25 31 | 32 | # Initialize the video writer 33 | fourcc = cv2.VideoWriter_fourcc(*"FFV1") 34 | fn_parts = os.path.splitext(ap.outfn) 35 | filename = ''.join([fn_parts[0], '_rgb_check', fn_parts[1]]) 36 | video_writer = cv2.VideoWriter(filename, fourcc, 37 | fps, (out_w, out_h), 38 | isColor=True) 39 | print(Fore.BLUE + "Loading semantics and writing video" + Style.RESET_ALL) 40 | palette = np.array(cityscapes_palette()) 41 | # Add a last class to palette, for the background 42 | palette = np.vstack((palette, np.array([0, 0, 0]))) 43 | 44 | with h5py.File(ap.events_h5, "r") as f, h5py.File(ap.semantics_h5, "r") as sems: 45 | # Read the events dataset 46 | # events_dataset = f["/depth/prophesee/left"][:] 47 | predictions = sems["/predictions"] 48 | ts = sems["/ts"] 49 | warped_image = sems["/warped_image"] 50 | rgb_image = f["/ovc/rgb/data"] 51 | 52 | for i in range(len(predictions)-1): 53 | # remap all the 255 to 19 so they are plotted black 54 | img = predictions[i] 55 | img[img == 255] = 19 56 | color_img = palette[img] 57 | color_img = color_img.squeeze(2).astype(np.uint8) 58 | 59 | write_frame = np.zeros((out_h, out_w, 3), dtype=np.uint8) 60 | 61 | write_frame[:,0*img_w:1*img_w,:] = warped_image[i,:img_h,:img_w,:] 62 | write_frame[:,1*img_w:2*img_w,:] = color_img[:img_h,:img_w,:] 63 | write_frame[:,2*img_w:3*img_w,:] = rgb_image[i,:img_h,:img_w,:] 64 | 65 | video_writer.write(write_frame) 66 | 67 | if ap.debug: 68 | print(Fore.GREEN + "Writing frame: {}".format(i) + 69 | Style.RESET_ALL, end="\r") 70 | video_writer.release() 71 | 72 | def generate_event_overlay_video(ap): 73 | # get all the semantic labels 74 | 75 | width = 1280 76 | height = 720 77 | fps = 25 78 | 79 | # Initialize the video writer 80 | fourcc = cv2.VideoWriter_fourcc(*"FFV1") 81 | video_writer = cv2.VideoWriter(ap.outfn, fourcc, 82 | fps, (width, height), 83 | isColor=True) 84 | print(Fore.BLUE + "Loading semantics and writing video" + Style.RESET_ALL) 85 | palette = np.array(cityscapes_palette()) 86 | # Add a last class to palette, for the background 87 | palette = np.vstack((palette, np.array([0, 0, 0]))) 88 | 89 | with h5py.File(ap.events_h5, "r") as f, h5py.File(ap.semantics_h5, "r") as sems: 90 | # Read the events dataset 91 | # events_dataset = f["/depth/prophesee/left"][:] 92 | pl = events_dataset = f["/prophesee/left"] 93 | predictions = sems["/predictions"] 94 | ts = sems["/ts"] 95 | ts_map_prophesee_left_t = sems["/ts_map_prophesee_left_t"] 96 | warped_image = sems["/warped_image"] 97 | 98 | for i in range(len(predictions)-1): 99 | # remap all the 255 to 19 so they are plotted black 100 | img = predictions[i] 101 | img[img == 255] = 19 102 | color_img = palette[img] 103 | color_img = color_img.squeeze(2).astype(np.uint8) 104 | 105 | # Get the events for the corresponding frame 106 | pl = f["/prophesee/left"] 107 | ev_x = pl["x"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] 108 | ev_y = pl["y"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] 109 | ev_p = pl["p"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] 110 | ev_t = pl["t"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] 111 | min_t = ev_t.min() 112 | max_t = min_t + 5e3 113 | idx = np.logical_and(ev_t >= min_t, ev_t <= max_t) 114 | ev_x = ev_x[idx] 115 | ev_y = ev_y[idx] 116 | ev_p = ev_p[idx]*255 117 | ev_t = ev_t[idx] 118 | 119 | # Color events in video 120 | color = np.array([ev_p, np.zeros_like(ev_p), 255-ev_p]) 121 | color = color//2 122 | 123 | # Merge them 124 | color_img[ev_y, ev_x, :] = color.T 125 | color_image_bgra = cv2.cvtColor(color_img, cv2.COLOR_RGBA2BGRA) 126 | video_writer.write(color_image_bgra[:, :, :3]) 127 | 128 | if ap.debug: 129 | print(Fore.GREEN + "Writing frame: {}".format(i) + 130 | Style.RESET_ALL, end="\r") 131 | video_writer.release() 132 | 133 | if __name__ == "__main__": 134 | import argparse 135 | ap = argparse.ArgumentParser() 136 | ap.add_argument("--events_h5", help="Input HDF5 file with camera data", required=True) 137 | ap.add_argument("--semantics_h5", help="Input HDF5 file from InternImage", required=True) 138 | ap.add_argument("--outfn", help="Output video file", required=True) 139 | # Add a debug flag argument 140 | ap.add_argument("--debug", help="Debug flag", action="store_true") 141 | ap = ap.parse_args() 142 | 143 | # Check that the input file exists 144 | if not os.path.exists(ap.semantics_h5): 145 | sys.exit("Input file does not exist: {}".format(ap.semantics_h5)) 146 | 147 | if not os.path.exists(ap.events_h5): 148 | sys.exit("Input file does not exist: {}".format(ap.events_h5)) 149 | 150 | generate_event_overlay_video(ap) 151 | # generate_rgb_check_video(ap) 152 | -------------------------------------------------------------------------------- /build_system/stats_and_summary/dataset_statistics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | import yaml 5 | import pdb 6 | import numpy as np 7 | from datetime import datetime 8 | from colorama import Fore, Back, Style 9 | 10 | 11 | class Statistics: 12 | def __init__(self, statistics_file, insert=None, duration=None): 13 | self.fn = statistics_file 14 | 15 | if insert is None: 16 | # Parse the yaml file 17 | with open(self.fn, 'r') as f: 18 | self.statistics = yaml.load(f, Loader=yaml.FullLoader) 19 | 20 | self.num_event_left = self.statistics["events_left"] 21 | self.num_event_right = self.statistics["events_right"] 22 | self.num_img_left = self.statistics["ovc_left_images"] 23 | self.num_img_right = self.statistics["ovc_right_images"] 24 | 25 | # Extract the start time 26 | date_time_str = self.statistics["start_date"].split(" (")[0] 27 | self.start_time = datetime.strptime(date_time_str, 28 | "%b %d %Y %H:%M:%S.%f") 29 | 30 | time_str = self.statistics["duration"] 31 | if '(' in time_str and ')' in time_str: 32 | seconds = time_str.split('(')[1].split(')')[0] 33 | else: 34 | seconds = time_str[:-1] 35 | 36 | # Remove the 's' at the end, if exists 37 | self.duration = float(seconds[:-1]) if seconds[-1] == 's' else float(seconds) 38 | else: 39 | assert duration is not None 40 | self.duration = duration 41 | assert self.duration != 0 42 | self.num_event_left = 0 43 | self.num_event_right = 0 44 | 45 | 46 | if __name__ == '__main__': 47 | import argparse 48 | argparser = argparse.ArgumentParser() 49 | argparser.add_argument('--output_folder', type=str, required=True, 50 | help='Folder where the statistics are stored') 51 | args = argparser.parse_args() 52 | 53 | # Check that the statistics folder exists and it is not empty 54 | if not os.path.exists(args.output_folder): 55 | sys.exit('The statistics folder does not exist') 56 | if len(os.listdir(args.output_folder)) == 0: 57 | sys.exit('The statistics folder is empty') 58 | 59 | all_stats = [] 60 | 61 | # List all the statistics files and iterate over them 62 | for file in os.listdir(args.output_folder): 63 | # Skip all the calib files for statistics 64 | if "calib" in file: 65 | continue 66 | full_path = os.path.join(args.output_folder, file, 67 | file + ".yaml") 68 | # Check if the path exists 69 | if not os.path.exists(full_path): 70 | print(Fore.RED + f"File {full_path} does not exist" + Fore.RESET) 71 | continue 72 | s = Statistics(full_path) 73 | all_stats.append(s) 74 | 75 | # all_stats.append(Statistics("falcon_pennov_outdoor_flight_parking_2", 76 | # insert=True, duration=109.0)) 77 | # all_stats.append(Statistics("falcon_pennov_outdoor_flight_parking_2", 78 | # insert=True, duration=113.0)) 79 | # all_stats.append(Statistics("falcon_pennov_outdoor_vision_1", 80 | #insert=True, duration=69.0)) 81 | 82 | environments = ["urban", "outdoor", "indoor", "forest"] 83 | vehicles = ["car", "falcon", "spot"] 84 | stats_summary = { 85 | vehicle: { 86 | environment: {"count": 0, "duration": 0} 87 | for environment in environments 88 | } 89 | for vehicle in vehicles 90 | } 91 | 92 | total_time = 0 93 | total_events = 0 94 | processed_bags = 0 95 | for stat in all_stats: 96 | if stat.duration > 200: 97 | # print red the name 98 | print(Fore.YELLOW + "Big file:", stat.fn, "- Total time:", 99 | str(stat.duration), Fore.RESET) 100 | 101 | # Identify the environment and vehicle from the filename 102 | environment = None 103 | vehicle = None 104 | for env in environments: 105 | if env in stat.fn: 106 | environment = env 107 | break 108 | for veh in vehicles: 109 | if veh in stat.fn: 110 | vehicle = veh 111 | break 112 | 113 | if vehicle is None: 114 | raise ValueError(f"Unknown vehicle in dataset: {stat.fn}") 115 | if environment is None: 116 | raise ValueError(f"Unknown environment in dataset: {stat.fn}") 117 | 118 | # Update the stats_summary dictionary based on the environment and vehicle 119 | stats_summary[vehicle][environment]["count"] += 1 120 | stats_summary[vehicle][environment]["duration"] += stat.duration 121 | 122 | total_time += stat.duration 123 | total_events += stat.num_event_left + stat.num_event_right 124 | processed_bags += 1 125 | 126 | # Print the table 127 | print("Vehicle Environment Total Sequences Test Sequences Total Time") 128 | for vehicle in vehicles: 129 | for environment in environments: 130 | count = stats_summary[vehicle][environment]["count"] 131 | test = int(count * .25) 132 | duration = stats_summary[vehicle][environment]["duration"] 133 | if count == 0: 134 | continue 135 | print(f"{vehicle: <9}{environment: <16}{count: <18}{test: <18}{duration: <.2f}") 136 | 137 | 138 | print(f"Processed {processed_bags} bags") 139 | 140 | # Print the total number of events with scientific notations 141 | print(f"Total number of events: {total_events:,}") 142 | 143 | # Print the total time in minutes, and seconds 144 | print(f"Total time: {total_time / 60:.2f} minutes, {total_time:.2f} seconds") 145 | -------------------------------------------------------------------------------- /build_system/stats_and_summary/getStats.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./build_system/preamble.bash 3 | 4 | output_files=("$STATS_PATH") 5 | check_files output_files 6 | 7 | echo -e "${YELLOW}Getting stats from bag file: ${BOLD}${BAG_NAME}${NC}" 8 | echo -e "${YELLOW}H5 file: ${BOLD}${H5_PATH}${NC}" 9 | 10 | ROS_INFO=$(rosbag info "${BAG_PATH}") 11 | # Get the start date using rosbag info, grep, and awk 12 | start_date=$(echo "${ROS_INFO}" | grep -oP 'start\s*:\s*\K.+') 13 | # Get the duration using rosbag info, grep, and awk 14 | duration=$(echo "${ROS_INFO}"| grep -oP 'duration\s*:\s*\K.+') 15 | # Get the raw bag file size 16 | bag_file_size_kb=$(du -k "${BAG_PATH}" | cut -f1) 17 | 18 | # Statistics from the H5 file 19 | h5_file_size_kb=$(du -k "${H5_PATH}" | cut -f1) 20 | # if this is a bag that has calib and lidar in the name, the number of events is zero 21 | if [[ $BAG_PATH == *"calib"* ]] && [[ $BAG_PATH == *"lidar"* ]]; then 22 | events_left=0 23 | events_right=0 24 | else 25 | events_left=$(h5ls -r "${H5_PATH}" | grep "prophesee/left/p" | awk '{print $3}'|sed 's/{\([0-9]*\)\/Inf}/\1/') 26 | events_right=$(h5ls -r "${H5_PATH}" | grep "prophesee/right/p" | awk '{print $3}'|sed 's/{\([0-9]*\)\/Inf}/\1/') 27 | fi 28 | ovc_left=$(h5ls -r "${H5_PATH}" | grep "ovc/left/data" | awk '{print $3}'|sed 's/{\([0-9]*\)\/.*/\1/') 29 | ovc_right=$(h5ls -r "${H5_PATH}" | grep "ovc/right/data" | awk '{print $3}'|sed 's/{\([0-9]*\)\/.*/\1/') 30 | 31 | # Check that all the statistics are not empty 32 | if [ -z "$start_date" ] || [ -z "$duration" ] || [ -z "$bag_file_size_kb" ] || [ -z "$h5_file_size_kb" ] || [ -z "$events_left" ] || [ -z "$events_right" ] || [ -z "$ovc_left" ] || [ -z "$ovc_right" ]; then 33 | echo -e "${RED}Some stats are empty${NC}" 34 | exit 1 35 | fi 36 | 37 | # Write the start date and duration to a YAML file 38 | cat > "${STATS_PATH}" << EOL 39 | start_date: ${start_date} 40 | duration: ${duration} 41 | raw_bag_file_size_kb: ${bag_file_size_kb} 42 | h5_file_size_kb: ${h5_file_size_kb} 43 | events_left: ${events_left} 44 | events_right: ${events_right} 45 | ovc_left_images: ${ovc_left} 46 | ovc_right_images: ${ovc_right} 47 | EOL 48 | 49 | chmod 666 "${STATS_PATH}" 50 | 51 | echo -e "${YELLOW}Stats written to: ${BOLD}${STATS_PATH}${NC}" 52 | 53 | # Create validation images for the LiDAR calibration for non "calib" bags 54 | # if [[ $BAG_PATH != *"calib"* ]]; then 55 | # python3 lidar/lidar_calib.py \ 56 | # --h5fn "$H5_PATH" --confirm_only \ 57 | # --percentage 0.25 --confirm_fn "$TMP_PATH/calib_validation1.png" \ 58 | # --npz_fn "$CALIB_LIDAR_PATH" 59 | # python3 lidar/lidar_calib.py \ 60 | # --h5fn "$H5_PATH" --confirm_only \ 61 | # --percentage 0.5 --confirm_fn "$TMP_PATH/calib_validation2.png" \ 62 | # --npz_fn "$CALIB_LIDAR_PATH" 63 | # python3 lidar/lidar_calib.py \ 64 | # --h5fn "$H5_PATH" --confirm_only \ 65 | # --percentage 0.75 --confirm_fn "$TMP_PATH/calib_validation3.png" \ 66 | # --npz_fn "$CALIB_LIDAR_PATH" 67 | # fi 68 | -------------------------------------------------------------------------------- /event_loading_by_ms.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import argparse 3 | 4 | parser = argparse.ArgumentParser() 5 | 6 | parser.add_argument("--data_h5", required=True, help="H5 file path with sensor data") 7 | parser.add_argument("--ms", required=True, type=int, default=100, help="Millisecond to load from") 8 | 9 | args = parser.parse_args() 10 | 11 | # Start by loading a file through h5py. The compression used is available within the base h5py package. 12 | f = h5py.File(args.data_h5,'r') 13 | 14 | # Get the locations for starting at 1000 ms and stopping at 1001 ms 15 | event_start_idx = f['/prophesee/left/ms_map_idx'][args.ms] 16 | event_stop_idx = f['/prophesee/left/ms_map_idx'][args.ms+1] 17 | 18 | # Load 200k events surrounding the central timestamp 19 | left_events_x = f['/prophesee/left/x'][event_start_idx:event_stop_idx] 20 | left_events_y = f['/prophesee/left/y'][event_start_idx:event_stop_idx] 21 | left_events_t = f['/prophesee/left/t'][event_start_idx:event_stop_idx] 22 | left_events_p = f['/prophesee/left/p'][event_start_idx:event_stop_idx] 23 | -------------------------------------------------------------------------------- /image_and_event_loading.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import argparse 3 | 4 | parser = argparse.ArgumentParser() 5 | 6 | parser.add_argument("--data_h5", required=True, help="H5 file path with sensor data") 7 | parser.add_argument("--idx", required=True, type=int, default=100, help="Image index to load") 8 | parser.add_argument("--n_events", required=True, type=int, default=200000, help="Number of events to load (centered on the image)") 9 | 10 | # Start by loading a file through h5py. The compression used is available within the base h5py package. 11 | f = h5py.File(args.data_h5,'r') 12 | 13 | # Grab the image itself 14 | left_image = f['/ovc/left/data'][args.idx] 15 | 16 | # Find the index in the event stream that correlates with the image time 17 | left_event_idx = f['/ovc/ts_map_prophesee_left_t'][args.idx] 18 | 19 | # Compute the start and stop index within the event stream 20 | half_n_events = args.n_events // 2 21 | start = left_event_idx - half_n_events 22 | stop = left_event_idx + half_n_events 23 | 24 | # Load 200k events surrounding the central timestamp 25 | left_events_x = f['/prophesee/left/x'][start:stop] 26 | left_events_y = f['/prophesee/left/y'][start:stop] 27 | left_events_t = f['/prophesee/left/t'][start:stop] 28 | left_events_p = f['/prophesee/left/p'][start:stop] 29 | -------------------------------------------------------------------------------- /lidar_loading.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | 4 | from ouster import client, pcap 5 | from ouster.sdk.examples.open3d import colorize 6 | 7 | import open3d as o3d 8 | 9 | import h5py 10 | 11 | if __name__ == "__main__": 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument('--data_h5', required=True, help="The h5 file to load") 14 | parser.add_argument('--out_file', help="The file to save the cloud into", default='/tmp/test.pcd') 15 | 16 | args = parser.parse_args() 17 | 18 | f = h5py.File(args.data_h5, 'r') 19 | 20 | ouster_sweep = f['/ouster/data'][0][...] 21 | ouster_metadata_str = f['/ouster/metadata'][...].tolist() 22 | 23 | metadata = client.SensorInfo(ouster_metadata_str) 24 | 25 | # precompute xyzlut to save computation in a loop 26 | xyzlut = client.XYZLut(metadata) 27 | 28 | packets = client.Packets([client.LidarPacket(opacket, metadata) for opacket in ouster_sweep], metadata) 29 | scan = next(iter(client.Scans(packets))) 30 | 31 | xyz = xyzlut(scan.field(client.ChanField.RANGE)) 32 | color = colorize(scan.field(client.ChanField.SIGNAL)) # SIGNAL or REFLECTIVITY 33 | 34 | pcd = o3d.geometry.PointCloud() 35 | pcd.points = o3d.utility.Vector3dVector(xyz.reshape(-1,3)) 36 | pcd.colors = o3d.utility.Vector3dVector(color.reshape((-1,3))) 37 | 38 | o3d.io.write_point_cloud(args.out_file, pcd) 39 | 40 | o3d.visualization.draw_geometries([pcd]) 41 | -------------------------------------------------------------------------------- /tools/download_m3ed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import yaml 3 | import requests 4 | import sys 5 | import pdb 6 | import os 7 | import tqdm 8 | 9 | BASE_URL = "https://m3ed-dist.s3.us-west-2.amazonaws.com" 10 | DATASETS_LIST_URL = "https://raw.githubusercontent.com/daniilidis-group/m3ed/main/dataset_list.yaml" 11 | 12 | 13 | def aws_url(filename, suffix): 14 | return f"{BASE_URL}/processed/{filename}/{filename}_{suffix}" 15 | 16 | 17 | def download_file_with_progress(url, local_path): 18 | response = requests.get(url, stream=True) 19 | # Get the total file size from the response headers 20 | total_size = int(response.headers.get("content-length", 0)) 21 | with open(local_path, "wb") as local_file, tqdm.tqdm( 22 | desc=local_path.split("/")[-1], 23 | total=total_size, 24 | unit="B", 25 | unit_scale=True, 26 | unit_divisor=1024, 27 | ) as progress_bar: 28 | for data in response.iter_content(chunk_size=1024): 29 | local_file.write(data) 30 | progress_bar.update(len(data)) 31 | 32 | 33 | class M3ED_File(): 34 | def __init__(self, filename): 35 | self.filename = filename 36 | self.download_links = None 37 | 38 | def download(self, output_dir, to_download): 39 | self.check_download(to_download) 40 | for download in self.download_links: 41 | if download not in to_download: 42 | continue 43 | link = self.download_links[download] 44 | filename = link.split("/")[-1] 45 | path = os.path.join(output_dir, self.filename) 46 | # Create directory if it does not exist 47 | if not os.path.exists(path): 48 | os.makedirs(path) 49 | filepath = os.path.join(path, filename) 50 | print(f"Downloading {filename} into {filepath}") 51 | download_file_with_progress(link, filepath) 52 | 53 | def check_download(self, to_download): 54 | if self.download_links is None: 55 | raise ValueError("Download links not initialized") 56 | empty = True 57 | for download in self.download_links: 58 | if download not in to_download: 59 | continue 60 | else: 61 | empty = False 62 | break 63 | return empty 64 | 65 | 66 | class M3ED_Data_File(M3ED_File): 67 | def __init__(self, filename, config_file): 68 | super().__init__(filename) 69 | self.download_links = {} 70 | 71 | # Initialize an empty dictionary to store the download links 72 | self.download_links = {} 73 | 74 | # Assign all the links directly to the dictionary 75 | self.download_links["data"] = aws_url(filename, "data.h5") 76 | if config_file["is_test_file"]: 77 | return 78 | 79 | self.download_links["data_videos_gray"] = aws_url(filename, 80 | "events_gray.avi") 81 | self.download_links["data_videos_rgb"] = aws_url(filename, "rgb.avi") 82 | self.download_links["depth_gt"] = aws_url(filename, "depth_gt.h5") 83 | self.download_links["pose_gt"] = aws_url(filename, "pose_gt.h5") 84 | self.download_links["gt_vids_depth"] = aws_url(filename, 85 | "depth_gt.avi") 86 | self.download_links["gt_vids_events"] = aws_url(filename, 87 | "depth_gt_events.avi") 88 | self.download_links["global_pcd"] = aws_url(filename, "global.pcd") 89 | raw = f"{BASE_URL}/input/raw_data/{filename}.bag" 90 | self.download_links["raw_data"] = raw 91 | if not config_file["internimage_semantics"]: 92 | return 93 | self.download_links["semantics"] = aws_url(filename, "semantics.h5") 94 | self.download_links["semantics_vids"] = aws_url(filename, 95 | "semantics.avi") 96 | 97 | 98 | if __name__ == "__main__": 99 | valid_data = ["data", "data_videos", "depth_gt", "pose_gt", 100 | "gt_vids", "semantics", "semantics_vids", 101 | "global_pcd", "raw_data"] 102 | import argparse 103 | parser = argparse.ArgumentParser() 104 | parser.add_argument("--vehicle", required=False, 105 | help="Type of vehicle to download: car, " + 106 | "falcon, spot. If not provided, all " + 107 | "vehicles will be downloaded") 108 | parser.add_argument("--environment", required=False, 109 | help="Type of environment to download: urban, " + 110 | "indoor, forest, outdoor. If not provided, " + 111 | "all environments will be downloaded") 112 | parser.add_argument("--to_download", required=False, 113 | help=f"Data to download: {', '.join(valid_data)}") 114 | parser.add_argument("--train_test", required=False, 115 | help="Train or test data to download: train, test. " + 116 | "If not provided, both train and " + 117 | "test will be downloaded") 118 | parser.add_argument("--yaml", required=False, 119 | help="Path to dataset_lists.yaml. " + 120 | "If not provided, it will be downloaded " + 121 | "from the repository") 122 | parser.add_argument("--output_dir", required=False, default="output", 123 | help="Output directory to download the data") 124 | parser.add_argument("--no_download", action="store_true", 125 | help="Do not download the data, " + 126 | "just print the list of files") 127 | args = parser.parse_args() 128 | 129 | # Check arguments 130 | if args.output_dir is None: 131 | output_dir = "output" 132 | else: 133 | output_dir = args.output_dir 134 | 135 | if args.vehicle is None: 136 | vehicles = ["car", "falcon", "spot"] 137 | elif args.vehicle in ["car", "falcon", "spot"]: 138 | vehicles = [args.vehicle] 139 | else: 140 | raise ValueError("Invalid vehicle type") 141 | 142 | if args.environment is None: 143 | environments = ["urban", "indoor", "forest", "outdoor"] 144 | elif args.environment in ["urban", "indoor", "forest", "outdoor"]: 145 | environments = [args.environment] 146 | else: 147 | raise ValueError("Invalid environment type") 148 | 149 | if args.to_download is None: 150 | to_download = valid_data 151 | elif args.to_download in valid_data: 152 | to_download = [args.to_download] 153 | else: 154 | raise ValueError("Invalid data type") 155 | 156 | print("====================") 157 | print("M3ED Download Script") 158 | print("====================") 159 | print(f"Data to download: {', '.join(to_download)}") 160 | print(f"Vehicle: {', '.join(vehicles)}") 161 | print(f"Environment: {', '.join(environments)}") 162 | print(f"Output directory: {output_dir}") 163 | 164 | # Ask user to continue 165 | if input("Continue? (y/n)") != "y": 166 | sys.exit() 167 | 168 | # Check if output directory exists or create it 169 | if not os.path.exists(output_dir): 170 | os.makedirs(output_dir) 171 | 172 | # if yaml file is not provided or it does not exist, download it 173 | if args.yaml is None: 174 | print("Downloading dataset_lists.yaml from the repository") 175 | with open("dataset_lists.yaml", "wb") as f: 176 | f.write(requests.get(DATASETS_LIST_URL).content) 177 | args.yaml = "dataset_lists.yaml" 178 | 179 | # load yaml file 180 | with open(args.yaml, "r") as f: 181 | dataset_list = yaml.safe_load(f) 182 | 183 | # Parse yaml file and instantiate all files 184 | download_files = {} 185 | for file in dataset_list: 186 | filename = file["file"] 187 | filetype = file["filetype"] 188 | # We do not support downloading calib files yet 189 | if "calib" in filetype: 190 | continue 191 | 192 | # Check if file is in the requested vehicle and environment 193 | check_vehicle = False 194 | for vehicle in vehicles: 195 | if vehicle in filename: 196 | check_vehicle = True 197 | break 198 | if not check_vehicle: 199 | continue 200 | check_environment = False 201 | for environment in environments: 202 | if environment in filename: 203 | check_environment = True 204 | break 205 | if not check_environment: 206 | continue 207 | 208 | is_test_file = file["is_test_file"] 209 | if is_test_file and args.train_test == "test": 210 | download_files[filename] = M3ED_Data_File(filename, file) 211 | elif not is_test_file and args.train_test == "train": 212 | download_files[filename] = M3ED_Data_File(filename, file) 213 | else: 214 | download_files[filename] = M3ED_Data_File(filename, file) 215 | 216 | # Download all the files in the list 217 | for filename, file in download_files.items(): 218 | if args.no_download: 219 | # Print without new line 220 | print(f"{filename}", end=', ') 221 | 222 | if args.no_download: 223 | print("") 224 | 225 | # Check that the list of files is not empty 226 | if len(download_files) == 0: 227 | sys.exit("No files to download for the given environment and vehicle") 228 | 229 | # Check inside the files if there is any file to download 230 | empty = True 231 | for filename, file in download_files.items(): 232 | if not file.check_download(to_download): 233 | empty = False 234 | break 235 | if empty: 236 | pdb.set_trace() 237 | sys.exit("No files to download with the required filters") 238 | 239 | # Download all the files 240 | if not args.no_download: 241 | for filename, file in download_files.items(): 242 | file.download(output_dir, to_download) 243 | --------------------------------------------------------------------------------