├── core ├── utils │ ├── __init__.py │ ├── common.py │ ├── color.py │ ├── quaternion.py │ └── radardsp.py ├── __init__.py ├── config.py ├── dataset.py ├── transform.py ├── lidar.py ├── record.py ├── calibration.py └── radar.py ├── __init__.py ├── .gitignore ├── requirements.txt ├── LICENSE ├── dataset └── dataset.json ├── README.md └── coloradar.py /core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | """Coloradar Dataset.""" 2 | -------------------------------------------------------------------------------- /core/__init__.py: -------------------------------------------------------------------------------- 1 | """Core. 2 | 3 | Collection of utilities for processing the Coloradar dataset 4 | """ 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # cache files 2 | *cache* 3 | 4 | # Virtual environment 5 | ?venv* 6 | 7 | # Dataset folder expect the dataset entry point 8 | dataset/* 9 | !dataset/*.json 10 | 11 | # Output folder for generated artifacts (images, etc.) 12 | output 13 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | contourpy==1.2.1 2 | cycler==0.12.1 3 | fonttools==4.53.0 4 | kiwisolver==1.4.5 5 | matplotlib==3.9.1 6 | numpy==2.0.0 7 | opencv-python==4.10.0.84 8 | packaging==24.1 9 | pillow==10.4.0 10 | pyparsing==3.1.2 11 | python-dateutil==2.9.0.post0 12 | six==1.16.0 13 | tornado==6.4.1 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 AMOUSSOU Zinsou Kenneth 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /core/utils/common.py: -------------------------------------------------------------------------------- 1 | """Common. 2 | 3 | Collection of general usage functions 4 | """ 5 | from .color import Color 6 | 7 | 8 | def error(msg: str) -> None: 9 | """Print an error message. 10 | 11 | The message would be printed in red in the terminal 12 | 13 | Argument: 14 | msg: Error message to print 15 | """ 16 | print(Color.RED, "[ERROR] " + msg, Color.RESET) 17 | 18 | def warning(msg: str) -> None: 19 | """Print a warning message. 20 | 21 | The message would be printed in yellow in the terminal 22 | 23 | Argument: 24 | msg: Error message to print 25 | """ 26 | print(Color.YELLOW, "[WARNING] " + msg, Color.RESET) 27 | 28 | def info(msg: str) -> None: 29 | """Print a warning message. 30 | 31 | The message would be printed in yellow in the terminal 32 | 33 | Argument: 34 | msg: Error message to print 35 | """ 36 | print(Color.RESET, "[INFO] " + msg) 37 | 38 | def success(msg: str) -> None: 39 | """Print a success message. 40 | 41 | The message would be printed in green in the terminal 42 | 43 | Argument: 44 | msg: Error message to print 45 | """ 46 | print(Color.GREEN, "[SUCCESS] " + msg, Color.RESET) 47 | -------------------------------------------------------------------------------- /core/utils/color.py: -------------------------------------------------------------------------------- 1 | """Color. 2 | 3 | Color codes characters that can be used in the terminal 4 | """ 5 | 6 | 7 | class Color: 8 | """Terminal color""" 9 | 10 | RESET = '\33[0m' 11 | BOLD = '\33[1m' 12 | ITALIC = '\33[3m' 13 | URL = '\33[4m' 14 | BLINK = '\33[5m' 15 | BLINK2 = '\33[6m' 16 | SELECTED = '\33[7m' 17 | 18 | BLACK = '\33[30m' 19 | RED = '\33[31m' 20 | GREEN = '\33[32m' 21 | YELLOW = '\33[33m' 22 | BLUE = '\33[34m' 23 | VIOLET = '\33[35m' 24 | BEIGE = '\33[36m' 25 | WHITE = '\33[37m' 26 | 27 | BLACKBG = '\33[40m' 28 | REDBG = '\33[41m' 29 | GREENBG = '\33[42m' 30 | YELLOWBG = '\33[43m' 31 | BLUEBG = '\33[44m' 32 | VIOLETBG = '\33[45m' 33 | BEIGEBG = '\33[46m' 34 | WHITEBG = '\33[47m' 35 | 36 | GREY = '\33[90m' 37 | RED2 = '\33[91m' 38 | GREEN2 = '\33[92m' 39 | YELLOW2 = '\33[93m' 40 | BLUE2 = '\33[94m' 41 | VIOLET2 = '\33[95m' 42 | BEIGE2 = '\33[96m' 43 | WHITE2 = '\33[97m' 44 | 45 | GREYBG = '\33[100m' 46 | REDBG2 = '\33[101m' 47 | GREENBG2 = '\33[102m' 48 | YELLOWBG2 = '\33[103m' 49 | BLUEBG2 = '\33[104m' 50 | VIOLETBG2 = '\33[105m' 51 | BEIGEBG2 = '\33[106m' 52 | WHITEBG2 = '\33[107m' 53 | -------------------------------------------------------------------------------- /core/config.py: -------------------------------------------------------------------------------- 1 | """Configuration.""" 2 | 3 | ROOTDIR: str = "dataset" 4 | 5 | # Entry point of the dataset 6 | DATASET: str = ROOTDIR + "/dataset.json" 7 | 8 | 9 | # Minimum number of szimuth bins 10 | NUMBER_AZIMUTH_BINS_MIN: int = 32 11 | 12 | # Minimum number of elevation bins 13 | NUMBER_ELEVATION_BINS_MIN: int = 64 14 | 15 | # Minimum number of doppler bins 16 | NUMBER_DOPPLER_BINS_MIN: int = 16 17 | 18 | # Minimum number of range bins 19 | NUMBER_RANGE_BINS_MIN: int = 128 20 | 21 | 22 | # DoA estimation methods 23 | # values: "fft", "esprit" 24 | DOA_METHOD: str = "esprit" 25 | 26 | # Radar Digital Signal Processing Method 27 | # values: "normal", "fesprit", "tdp" 28 | # - "normal" stands for chain of Range-Doppler FFT, CFAR and DOA 29 | # - "fesprit" also computes the Range-Doppler FFT but uses ESPRIT for 30 | # a precise frequency estimation for Range, Doppler and DOA. No need 31 | # for CFAR 32 | # - "tdp" stands for time domain processing 33 | # NOTE: "fesprit" is still being tested and optimized 34 | # 35 | RDSP_METHOD: str = "normal" 36 | 37 | 38 | # 2D Range-Doppler OS-CFAR Parameters used for generating 39 | # radar pointclouds 40 | RD_OS_CFAR_WS: int = 8 # Window size 41 | RD_OS_CFAR_GS: int = 1 # Guard cell 42 | RD_OS_CFAR_K: float = 0.75 # n'th quantile 43 | RD_OS_CFAR_TOS: int = 8 # Tos factor 44 | -------------------------------------------------------------------------------- /core/dataset.py: -------------------------------------------------------------------------------- 1 | """Dataset. 2 | 3 | Root module loading the dataset 4 | """ 5 | import json 6 | 7 | from core.config import ROOTDIR, DATASET 8 | from core.calibration import Calibration 9 | from core.record import Record 10 | 11 | 12 | class Coloradar: 13 | """Coloradar dataset loader. 14 | 15 | Attributes: 16 | index: JSON file describing the dataset 17 | rootdir: Root directory containing the dataset 18 | calibration: Object holding sensor calibration details 19 | """ 20 | 21 | def __init__(self, rootdir: str = ROOTDIR) -> None: 22 | """Load dataset paths. 23 | 24 | Argument: 25 | rootdir: Root directory marking the entry point of the dataset 26 | """ 27 | self.index: str = DATASET 28 | self.rootdir: str = rootdir 29 | self.config: dict = {} 30 | with open(DATASET, "r") as fh: 31 | self.config = json.load(fh) 32 | self.calibration = Calibration(self.config["calibration"]) 33 | 34 | def getRecord(self, codename: str, index: int) -> Record: 35 | """Read a sensor recording frame from the dataset.""" 36 | return Record( 37 | self.config["datastore"], self.calibration, codename, index) 38 | 39 | def printCodenames(self) -> None: 40 | """Print the available sub-dataset and their codename.""" 41 | print(" Coloradar dataset") 42 | print(f" {'[CODENAME]':30} [DIRECTORY NAME]") 43 | for dataset in self.config["datastore"]["folders"]: 44 | print(f" {dataset['codename']:30} {dataset['path']}") 45 | print() 46 | 47 | def __str__(self) -> str: 48 | return json.dumps(self.config, indent=2) 49 | -------------------------------------------------------------------------------- /core/utils/quaternion.py: -------------------------------------------------------------------------------- 1 | """Quaternion. 2 | 3 | Collection of function for manipulating quaternions 4 | """ 5 | import numpy as np 6 | 7 | 8 | def quaternion_to_rotation_matrix(q: np.array) -> np.matrix: 9 | """Convert a quaternion into a rotation matrix. 10 | 11 | r00 r01 r02 12 | Rt = r10 r11 r12 13 | r20 r21 r22 14 | """ 15 | q1, q2, q3, q0 = q 16 | 17 | # 1rst row of the rotation matrix 18 | r00 = 2 * (q0 * q0 + q1 * q1) - 1 19 | r01 = 2 * (q1 * q2 - q0 * q3) 20 | r02 = 2 * (q1 * q3 + q0 * q2) 21 | 22 | # 2nd row of the rotation matrix 23 | r10 = 2 * (q1 * q2 + q0 * q3) 24 | r11 = 2 * (q0 * q0 + q2 * q2) - 1 25 | r12 = 2 * (q2 * q3 - q0 * q1) 26 | 27 | # 3rd row of the rotation matrix 28 | r20 = 2 * (q1 * q3 - q0 * q2) 29 | r21 = 2 * (q2 * q3 + q0 * q1) 30 | r22 = 2 * (q0 * q0 + q3 * q3) - 1 31 | 32 | return np.matrix([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]]) 33 | 34 | # Alias name of the function above 35 | q2r = quaternion_to_rotation_matrix 36 | 37 | 38 | def quaternion_to_extended_rotation_matrix(q: np.array, t: np.array) -> np.matrix: 39 | """Quaternion and translation vector to rotation matrix. 40 | 41 | Convert a quaternion and a translation matrix into an extended rotation matrix. 42 | 43 | r00 r01 r02 t0 44 | Rt = r10 r11 r12 t1 45 | r20 r21 r22 t2 46 | 0 0 0 1 47 | """ 48 | r = quaternion_to_rotation_matrix(q) 49 | r = np.column_stack((r, t)) 50 | return np.row_stack((r, [0., 0., 0., 1.0])) 51 | 52 | # Alias name for the function above 53 | q2er = quaternion_to_extended_rotation_matrix 54 | -------------------------------------------------------------------------------- /core/transform.py: -------------------------------------------------------------------------------- 1 | """Tranform. 2 | 3 | Handle the projection of data from the base coordinate system to 4 | a sensor-specific coordinate system. 5 | """ 6 | 7 | import os 8 | import numpy as np 9 | 10 | from core.config import ROOTDIR 11 | from .utils.quaternion import q2r, q2er 12 | 13 | 14 | class Transform: 15 | """Transform. 16 | 17 | Attributes: 18 | t: Translation vector 19 | q: Rotation quaternion 20 | r: Rotation matrix 21 | rt: Extended rotation matrix 22 | The extended rotation matrix contain both rotation matrix and 23 | translation vecteor 24 | """ 25 | 26 | def __init__(self, filepath: str) -> None: 27 | """Tranform constructor. 28 | 29 | Argument: 30 | filepath: Path to access the transform file 31 | """ 32 | 33 | with open(os.path.join(ROOTDIR, filepath), "r") as fh: 34 | line_number: int = 0 35 | for line in fh: 36 | line_number += 1 37 | if line_number == 1: 38 | self.t = np.array([float(x) for x in line.split(" ")]) 39 | else: 40 | self.q = np.array([float(x) for x in line.split(" ")]) 41 | self.r = q2r(self.q) 42 | self.rt = q2er(self.q, self.t) 43 | 44 | 45 | class BaseToCCRadar(Transform): 46 | """Base to Cascade Chip Radar tranform.""" 47 | 48 | pass 49 | 50 | 51 | class BaseToSCRadar(Transform): 52 | """Base to Single Chip Radar tranform.""" 53 | 54 | pass 55 | 56 | 57 | class BaseToLidar(Transform): 58 | """Base to Lidar tranform.""" 59 | 60 | pass 61 | 62 | 63 | class BaseToVicon(Transform): 64 | """Base to Vicon tranform.""" 65 | 66 | pass 67 | 68 | 69 | class BaseToImu(Transform): 70 | """Base to IMU tranform.""" 71 | 72 | pass 73 | -------------------------------------------------------------------------------- /dataset/dataset.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.01", 3 | "description": { 4 | "ccradar": "Cascade Chips Radar", 5 | "scradar": "Single Chip Radar" 6 | }, 7 | "calibration": { 8 | "ccradar": { 9 | "antenna": "calib/cascade/antenna_cfg.txt", 10 | "coupling": "calib/cascade/coupling_calib.txt", 11 | "heatmap": "calib/cascade/heatmap_cfg.txt", 12 | "phase": "calib/cascade/phase_frequency_calib.txt", 13 | "waveform": "calib/cascade/waveform_cfg.txt" 14 | }, 15 | "scradar": { 16 | "antenna": "calib/single_chip/antenna_cfg.txt", 17 | "coupling": "calib/single_chip/coupling_calib.txt", 18 | "heatmap": "calib/single_chip/heatmap_cfg.txt", 19 | "waveform": "calib/single_chip/waveform_cfg.txt" 20 | }, 21 | "transform": { 22 | "base-to-ccradar": "calib/transforms/base_to_cascade.txt", 23 | "base-to-scradar": "calib/transforms/base_to_single_chip.txt", 24 | "base-to-imu": "calib/transforms/base_to_imu.txt", 25 | "base-to-lidar": "calib/transforms/base_to_lidar.txt", 26 | "base-to-vicon": "calib/transforms/base_to_vicon.txt" 27 | } 28 | }, 29 | "datastore": { 30 | "folders": [ 31 | { 32 | "codename": "hallway0", 33 | "path": "12_21_2020_ec_hallways_run0" 34 | }, 35 | { 36 | "codename": "hallway4", 37 | "path": "12_21_2020_ec_hallways_run4" 38 | }, 39 | { 40 | "codename": "hallway1", 41 | "path": "12_21_2020_ec_hallways_run1" 42 | }, 43 | { 44 | "codename": "outdoor0", 45 | "path": "2_28_2021_outdoors_run0" 46 | }, 47 | { 48 | "codename": "aspen0", 49 | "path": "2_24_2021_aspen_run0" 50 | }, 51 | { 52 | "codename": "aspen9", 53 | "path": "2_24_2021_aspen_run9" 54 | }, 55 | { 56 | "codename": "aspen11", 57 | "path": "2_24_2021_aspen_run11" 58 | }, 59 | { 60 | "codename": "edgar5", 61 | "path": "2_23_2021_edgar_army_run5" 62 | }, 63 | { 64 | "codename": "longboard1", 65 | "path": "2_22_2021_longboard_run1" 66 | } 67 | ], 68 | "paths": { 69 | "ccradar": { 70 | "raw": { 71 | "timestamp": "cascade/adc_samples/timestamps.txt", 72 | "data": "cascade/adc_samples/data", 73 | "filename_prefix": "frame_" 74 | }, 75 | "heatmap": { 76 | "timestamp": "cascade/heatmaps/timestamps.txt", 77 | "data": "cascade/heatmaps/data", 78 | "filename_prefix": "heatmap_" 79 | }, 80 | "pointcloud": { 81 | "timestamp": "", 82 | "data": "cascade/pointclouds", 83 | "filename_prefix": "radar_pcl" 84 | } 85 | }, 86 | "scradar": { 87 | "raw": { 88 | "timestamp": "single_chip/adc_samples/timestamps.txt", 89 | "data": "single_chip/adc_samples/data", 90 | "filename_prefix": "frame_" 91 | }, 92 | "heatmap": { 93 | "timestamp": "single_chip/heatmaps/timestamps.txt", 94 | "data": "single_chip/heatmaps/data", 95 | "filename_prefix": "heatmap_" 96 | }, 97 | "pointcloud": { 98 | "timestamp": "single_chip/pointclouds/timestamps.txt", 99 | "data": "single_chip/pointclouds/data", 100 | "filename_prefix": "radar_pointcloud_" 101 | } 102 | }, 103 | "lidar": { 104 | "timestamp": "lidar/timestamps.txt", 105 | "data": "lidar/pointclouds", 106 | "filename_prefix": "lidar_pointcloud_" 107 | }, 108 | "imu": { 109 | "timestamp": "imu/timestamps.txt", 110 | "data": "imu/imu_data.txt" 111 | }, 112 | "groundtruth": { 113 | "timestamp": "groundtruth/timestamps.txt", 114 | "data": "groundtruth/groundtruth_poses.txt" 115 | } 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /core/lidar.py: -------------------------------------------------------------------------------- 1 | """Lidar.""" 2 | import sys 3 | import os 4 | from typing import Optional 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | from .utils.common import error, warning 10 | 11 | 12 | class Lidar(object): 13 | """Lidar. 14 | 15 | Attributes: 16 | filepah: Path of the lidar recording 17 | cld: Lidar pointcloud 18 | """ 19 | 20 | # The recorded attributes are: 21 | # x, y, z, I (Intensity of the reflections) 22 | NUMBER_RECORDING_ATTRIBUTES: int = 4 23 | 24 | def __init__(self, config: dict[str, str], calib, index: int) -> None: 25 | """Init. 26 | 27 | Argument: 28 | config (dict): Paths to access the dataset 29 | calib (Calibration): Calibration object (See calibration.py) 30 | index (int): Index of the lidar record to load 31 | """ 32 | self.calibration = calib 33 | filename: str = self._filename( 34 | config["paths"]["lidar"]["filename_prefix"], 35 | index, 36 | "bin" 37 | ) 38 | self.filepath = os.path.join( 39 | config["paths"]["rootdir"], 40 | config["paths"]["lidar"]["data"], 41 | filename 42 | ) 43 | try: 44 | cld = np.fromfile(self.filepath, np.float32) 45 | self.cld = np.reshape(cld, (-1, self.NUMBER_RECORDING_ATTRIBUTES)) 46 | except FileNotFoundError: 47 | error(f"File '{self.filepath}' not found.") 48 | sys.exit(1) 49 | 50 | def show(self, render: bool = True) -> None: 51 | """Render the lidar pointcloud.""" 52 | if self.cld is None: 53 | warning("No pointcloud found!") 54 | return None 55 | ax = plt.axes(projection="3d") 56 | ax.set_title("Lidar pointcloud") 57 | ax.set_xlabel("x") 58 | ax.set_ylabel("y") 59 | ax.set_zlabel("z") 60 | plot = ax.scatter( 61 | self.cld[:, 0], 62 | self.cld[:, 1], 63 | self.cld[:, 2], 64 | c=self.cld[:, 3], 65 | cmap=plt.cm.get_cmap(), 66 | ) 67 | plt.colorbar(plot) 68 | if render: 69 | plt.show() 70 | 71 | def getPointlcoud(self) -> np.matrix: 72 | """Return the lidar pointcloud.""" 73 | return np.matrix(self.cld) 74 | 75 | def getPointCloud(self, **kwargs) -> np.array: 76 | """Get the x, y, z coordinate from the lidar dataset. 77 | Return 78 | Format: 79 | X001 Y001 Z001 80 | X002 Y002 Z002 81 | X003 Y003 Z003 82 | ... 83 | Shape: (n, 3) 84 | """ 85 | return self.cld[:, 0:3] 86 | 87 | def getPointCloudSample(self, 88 | x_filter: Optional[tuple[float, float]] = None, 89 | y_filter: Optional[tuple[float, float]] = None, 90 | z_filter: Optional[tuple[float, float]] = None, 91 | full: bool = False) -> np.array: 92 | """Extract a part of the pointcloud based on the filters. 93 | 94 | Arguments: 95 | x_filter: tuple containing the min and max values defining 96 | the x-axis boundary 97 | y_filter: tuple containing the min and max values defining 98 | the y-axis boundary 99 | z_filter: tuple containing the min and max values defining 100 | the z-axis boundary 101 | If z_filter is "None", the pointcloud is not filtered 102 | in regard of the z-axis. 103 | full: Boolean flag. When it's "True" all the entries of the pointcould 104 | are returned. If not, only the (x, y, z) columns of the dataset 105 | are returned. 106 | Note: Each filtering tuple is compose of min value and max value (in 107 | that order) 108 | """ 109 | if full: 110 | pointcloud = self.cld 111 | else: 112 | pointcloud = self.getPointCloud() 113 | filtering_mask = True 114 | if x_filter: 115 | x_min, x_max = x_filter 116 | filtering_mask = filtering_mask & (pointcloud[:, 0] >= x_min) 117 | filtering_mask = filtering_mask & (pointcloud[:, 0] <= x_max) 118 | if y_filter: 119 | y_min, y_max = y_filter 120 | filtering_mask = filtering_mask & (pointcloud[:, 1] >= y_min) 121 | filtering_mask = filtering_mask & (pointcloud[:, 1] <= y_max) 122 | if z_filter: 123 | z_min, z_max = z_filter 124 | filtering_mask = filtering_mask & (pointcloud[:, 2] >= z_min) 125 | filtering_mask = filtering_mask & (pointcloud[:, 2] <= z_max) 126 | return pointcloud[filtering_mask] 127 | 128 | 129 | def getBirdEyeView(self, resolution: float, 130 | srange: tuple[float, float], # side range 131 | frange: tuple[float, float], # forward range 132 | ) -> None: 133 | """Generate the bird eye view of the pointcloud. 134 | Arguments: 135 | resoluton: The pixel resolution of the image to generate 136 | srange: Side range to cover. 137 | Format: (srange_min, srange_max) 138 | frange: Forward range to cover. 139 | Format (frange_min, frange_max) 140 | Note: The ranges are expected to be provided the minimum first and then 141 | the maxnimum 142 | ----------------- <-- frange_max 143 | | | 144 | | ^ | 145 | | | | 146 | | <-- 0 | 147 | | | 148 | | | 149 | ----------------- <-- frange_min 150 | ^ ^ 151 | srange_min srange_max 152 | """ 153 | pointcloud = self.getPointCloudSample(frange, srange, z_filter=None, full=True) 154 | x = pointcloud[:, 0] 155 | y = pointcloud[:, 1] 156 | z = pointcloud[:, 2] 157 | 158 | ximg = (-y / resolution).astype(np.int32) 159 | yimg = (-x / resolution).astype(np.int32) 160 | 161 | ximg -= int(np.floor(srange[0] / resolution)) 162 | yimg += int(np.floor(frange[1] / resolution)) 163 | 164 | # Prepare the three channels of the bird eye view image 165 | pixels = np.zeros((len(z), 3), dtype=np.uint8) 166 | 167 | # Encode distance 168 | norm = np.sqrt(x **2 + y **2 + z**2) 169 | pixels[:, 0] = (255.0 / (1.0 + np.exp(-norm))).astype(np.uint8) 170 | 171 | # Encode height information 172 | pixels[:, 1] = (255.0 / (1.0 + np.exp(-z))).astype(np.uint8) 173 | 174 | # Encode intensity (for lidar) and radial velosity (for radar) 175 | pixels[:, 2] = pointcloud[:, 3] 176 | # Scale pixels between 0 and 2555 177 | minv = min(pointcloud[:, 3]) 178 | maxv = max(pointcloud[:, 3]) 179 | pixels[:, 2] = ( 180 | ((pixels[:, 2] - minv) / np.abs(maxv - minv)) * 255 181 | ).astype(np.uint8) 182 | 183 | # Create the frame for the bird eye view 184 | # Note: the "+1" to estimate the width and height of the image is 185 | # to count for the (0, 0) position in the center of the pointcloud 186 | img_width: int = 1 + int((srange[1] - srange[0])/resolution) 187 | img_height: int = 1 + int((frange[1] - frange[0])/resolution) 188 | bev_img = np.zeros([img_height, img_width, 3], dtype=np.uint8) 189 | 190 | # Set the height information in the created image frame 191 | bev_img[yimg, ximg] = pixels 192 | return bev_img 193 | 194 | def _filename(self, basename: str, index: int, ext: str) -> str: 195 | """Filename builder. 196 | 197 | Generate a typical filename in the Coloradar dataset given its index 198 | 199 | Arguments: 200 | basename: Filename prefix 201 | index: Order number of the file in the dataset 202 | ext: Extension of the file 203 | Return: 204 | name of the file in the dataset 205 | """ 206 | return basename + str(index) + "." + ext 207 | -------------------------------------------------------------------------------- /core/record.py: -------------------------------------------------------------------------------- 1 | """Record. 2 | 3 | A record is a collection of measurement from different sensors 4 | (lidar, radar, imu, etc.) 5 | """ 6 | from glob import glob 7 | import sys 8 | import os 9 | import multiprocessing 10 | 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | import cv2 as cv 14 | 15 | from core.config import ROOTDIR, DATASET 16 | from core.lidar import Lidar 17 | from core.radar import SCRadar, CCRadar 18 | 19 | from .utils.common import error 20 | 21 | 22 | class Record: 23 | """Record. 24 | 25 | Class describing records in the dataset 26 | 27 | Attributes: 28 | calibration: Calibration parameters 29 | lidar: Velodyne samples of the dataset record 30 | """ 31 | 32 | def __init__(self, descriptor: dict[str, dict[str, str]], 33 | calibration, codename: str, index: int) -> None: 34 | """Init. 35 | 36 | Arguments: 37 | descriptor: Holds the paths that describe the dataset 38 | calibration: Calibration object 39 | Provide all the calibration parameters of all sensors 40 | codename: Subdirectory codename 41 | index: Order number indicating the entry of the dataset in interest 42 | """ 43 | self.calibration = calibration 44 | self.descriptor = descriptor 45 | self.index = index 46 | self.codename: str = codename 47 | subdir: str = "" 48 | for dataset in descriptor["folders"]: 49 | if dataset["codename"] == codename: 50 | subdir = dataset["path"] 51 | break 52 | if not subdir: 53 | error(f"Dataset codename '{codename}' not defined in '{DATASET}") 54 | sys.exit(1) 55 | self.descriptor["paths"]["rootdir"] = os.path.join(ROOTDIR, subdir) 56 | self.lidar = None 57 | self.scradar = None 58 | self.ccradar = None 59 | 60 | def load(self, sensor: str) -> None: 61 | """Load the data file for a given sensor. 62 | 63 | Arguments: 64 | sensor: The sensor considered so that only the data of that sensor 65 | would be loaded. 66 | Possible Values: lidar, scradar, ccradar 67 | """ 68 | 69 | if sensor == "lidar": 70 | self.lidar = Lidar(self.descriptor, self.calibration, self.index) 71 | elif sensor == "scradar": 72 | self.scradar = SCRadar(self.descriptor, self.calibration, self.index) 73 | elif sensor == "ccradar": 74 | self.ccradar = CCRadar(self.descriptor, self.calibration, self.index) 75 | 76 | def process_and_save(self, sensor: str, **kwargs) -> None: 77 | """Process and save the result into an output folder. 78 | 79 | Arguments: 80 | sensor (str): Name of sensor of interest 81 | Values: "lidar", "scradar", "ccradar" 82 | kwargs (dict): Keyword argument 83 | "threshold": Threshold value to be used for rendering 84 | radar heatmap 85 | "no_sidelobe": Ignore closest recording in each frame 86 | "velocity_view": Enable the rendering of radial velocity 87 | as fourth dimention 88 | "heatmap_3d": Save 3D heatmap when true. Otherwise, a 89 | 2D heatmap is generated 90 | "save_as": Save arrays as `.csv` or `.bin` files 91 | """ 92 | # Dot per inch 93 | self._dpi: int = 400 94 | self._kwargs = kwargs 95 | self._sensor = sensor 96 | 97 | # Output directory path 98 | output_dir: str = kwargs.get("output", "output") 99 | output_dir = f"{output_dir}/{self.codename}/{sensor}" 100 | os.makedirs(output_dir, exist_ok=True) 101 | self._output_dir = output_dir 102 | cpu_count: int = multiprocessing.cpu_count() 103 | print(f"Please wait! Processing on {cpu_count} CPU(s)") 104 | 105 | if sensor == "lidar": 106 | dataset_path: str = os.path.join( 107 | self.descriptor["paths"]["rootdir"], 108 | self.descriptor["paths"][sensor]["data"] 109 | ) 110 | nb_files: int = len(os.listdir(dataset_path)) - 1 111 | with multiprocessing.Pool(cpu_count) as pool: 112 | pool.map( 113 | self._process_lidar, 114 | range(1, nb_files + 1), 115 | chunksize=10 116 | ) 117 | elif (sensor == "ccradar") or (sensor == "scradar"): 118 | dataset_path: str = os.path.join( 119 | self.descriptor["paths"]["rootdir"], 120 | self.descriptor["paths"][sensor]["raw"]["data"] 121 | ) 122 | nb_files: int = len(os.listdir(dataset_path)) - 1 123 | with multiprocessing.Pool(cpu_count) as pool: 124 | pool.map( 125 | self._process_radar, 126 | range(1, nb_files + 1), 127 | chunksize=10 128 | ) 129 | 130 | def _process_radar(self, idx: int) -> int: 131 | """Handler of radar data processing. 132 | 133 | Used as the handler for parallel processing. The context attributes 134 | needed by this method are only defined in the method `process_and_save` 135 | As so, only that method is supposed to call this one. 136 | 137 | NOTE: THIS METHOD IS NOT EXPECTED TO BE CALLED FROM OUTSIDE OF THIS 138 | CLASS 139 | 140 | Argument: 141 | idx: Index of the file to process 142 | """ 143 | self.index = idx 144 | self.load(self._sensor) 145 | radar: SCRadar | CCRadar = self.scradar 146 | if self.ccradar: 147 | radar = self.ccradar 148 | SIZE: int = 20 # inch 149 | plt.figure(1, clear=True, dpi=self._dpi, figsize=(SIZE, SIZE)) 150 | if self._kwargs.get("heatmap_3d") == False: 151 | radar.show2dHeatmap(False, False) 152 | elif self._kwargs.get("heatmap_3d"): 153 | radar.showHeatmapFromRaw( 154 | self._kwargs.get("threshold"), 155 | self._kwargs.get("no_sidelobe"), 156 | self._kwargs.get("velocity_view"), 157 | self._kwargs.get("polar"), 158 | show=False, 159 | ) 160 | elif self._kwargs.get("pointcloud"): 161 | if self._kwargs.get("save_as") == "csv": 162 | pcl = radar.getPointcloudFromRaw( 163 | polar=self._kwargs.get("polar") 164 | ) 165 | np.savetxt( 166 | f"{self._output_dir}/radar_pcl{idx}.csv", 167 | pcl.astype(np.float32), 168 | delimiter=",", 169 | header="Azimuth (m), " 170 | "Range (m), " 171 | "Elevation (m), " 172 | "Velocity (m/s), " 173 | "Intensity or SNR (dB)" 174 | ) 175 | return idx 176 | elif self._kwargs.get("save_as") == "bin": 177 | pcl = radar.getPointcloudFromRaw( 178 | polar=self._kwargs.get("polar")) 179 | pcl.astype(np.float32).tofile( 180 | f"{self._output_dir}/radar_pcl{idx}.bin") 181 | return idx 182 | radar.showPointcloudFromRaw( 183 | self._kwargs.get("velocity_view"), 184 | self._kwargs.get("bird_eye_view"), 185 | self._kwargs.get("polar"), 186 | show=False, 187 | ) 188 | plt.savefig(f"{self._output_dir}/radar_{idx:04}.jpg", dpi=self._dpi) 189 | return idx 190 | 191 | def _process_lidar(self, idx: int) -> int: 192 | """Handler of lidar data processing. 193 | 194 | Used as the handler for parallel processing. The context attributes 195 | needed by this method are only defined in the method `process_and_save` 196 | As so, only that method is supposed to call this one. 197 | 198 | NOTE: THIS METHOD IS NOT EXPECTED TO BE CALLED FROM OUTSIDE OF THIS 199 | CLASS 200 | 201 | Argument: 202 | idx: Index of the file to process 203 | """ 204 | self.index = idx 205 | self.load(self._sensor) 206 | bev = self.lidar.getBirdEyeView( 207 | self._kwargs.get("resolution", 0.05), 208 | self._kwargs.get("srange"), 209 | self._kwargs.get("frange"), 210 | ) 211 | plt.imsave(f"{self._output_dir}/lidar_bev_{idx:04}.jpg", bev) 212 | 213 | def make_video(self, inputdir: str, ext: str = "jpg") -> None: 214 | """Make video out of pictures""" 215 | files = glob(inputdir + f"/*.{ext}") 216 | files = sorted(files) 217 | height, width, _ = plt.imread(files[0]).shape 218 | fourcc = cv.VideoWriter_fourcc(*'MJPG') 219 | video = cv.VideoWriter(inputdir + f"/{self.codename}.avi", fourcc, 5, (width, height)) 220 | for idx, img in enumerate(files): 221 | print( 222 | f"[ ========= {100 * idx/len(files): 2.2f}% ========= ]\r", 223 | end="" 224 | ) 225 | video.write(cv.imread(img)) 226 | cv.destroyAllWindows() 227 | video.release() 228 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Coloradar 2 | 3 | The `coloradar` package makes it easy to access and render entries of the Coloradar dataset. 4 | From the dataset, only the Lidar, single-chip radar, and cascade radar are covered for now in the 5 | current version of the package. 6 | 7 | One of the interesting aspects of the Coloradar dataset is the availability of raw ADC samples of 8 | the radar sensors. Thus, it's possible to apply the desired radar signal processing techniques. 9 | 10 | With the current version of the `coloradar` package, the supported features as presented in the table below 11 | 12 | 13 | | Sensor | 3D PCL | 3d Heatmap | Bird Eye View | Heatmap (from raw ADC) | PCL (from raw ADC) | 14 | |-----------------------|:------:|:----------:|:-------------:|:----------------------:|:------------------:| 15 | | LiDaR | x | - | x | - | - | 16 | | Single chip radar | x | x | x | x | x | 17 | | Cascaded radar | - | x | x | x | x | 18 | 19 | [x] : Implemented 20 | 21 | [-]: N/A or Not Implemented 22 | 23 | PCL: Pointcloud 24 | 25 | For the radar sensors, the first two columns of the table above indicate post-processed data made available 26 | in the `coloradar` dataset. 27 | 28 | 29 | ## Environment setup 30 | 31 | Create a Python virtual environment and install the required packages as follows: 32 | 33 | ```bash 34 | python -m venv venv 35 | source ./venv/bin/activate 36 | 37 | python -m pip install -r requirements.txt 38 | ``` 39 | 40 | ## Coloradar dataset 41 | 42 | The coloradar dataset can be downloaded from [coloradar dataset](https://arpg.github.io/coloradar/). 43 | 44 | The dataset is composed of many subsets that can be downloaded separately. But the folder structure of 45 | each subset is the same. 46 | 47 | ## Setup and configuration 48 | 49 | The structure of this repository is as follows: 50 | 51 | ```txt 52 | . 53 | ├── core 54 | ├── dataset 55 | │   └── dataset.json 56 | ├── __init__.py 57 | ├── main.py 58 | ├── README.md 59 | └── requirements.txt 60 | ``` 61 | 62 | Before using this tool, the dataset must be downloaded and unzipped in the `dataset` folder as well as 63 | the calibration information (named `calib` on the coloradar dataset website) of the different sensors. 64 | 65 | Then, the folder structure would look like this: 66 | 67 | ```txt 68 | . 69 | ├── core 70 | ├── dataset 71 | │   ├── 12_21_2020_ec_hallways_run0 72 | │   ├── 12_21_2020_ec_hallways_run4 73 | │   ├── 2_22_2021_longboard_run1 74 | │   ├── 2_23_2021_edgar_army_run5 75 | │   ├── 2_24_2021_aspen_run0 76 | │   ├── 2_24_2021_aspen_run9 77 | │   ├── 2_28_2021_outdoors_run0 78 | │   ├── calib 79 | │   └── dataset.json 80 | ├── __init__.py 81 | ├── main.py 82 | ├── README.md 83 | └── requirements.txt 84 | ``` 85 | 86 | In the snippet above, one can notice that multiple subsets of the dataset have been downloaded. The key 87 | point to configure the supported subset of the dataset and how to access them is the 88 | `dataset/dataset.json` file. 89 | 90 | The only part that requires your attention is the `folders` key of the `datastore` entry. 91 | 92 | ```json 93 | "folders": [ 94 | { 95 | "codename": "hallway0", 96 | "path": "12_21_2020_ec_hallways_run0" 97 | }, 98 | { 99 | "codename": "hallway4", 100 | "path": "12_21_2020_ec_hallways_run4" 101 | }, 102 | { 103 | "codename": "hallway1", 104 | "path": "12_21_2020_ec_hallways_run1" 105 | }, 106 | { 107 | "codename": "outdoor0", 108 | "path": "2_28_2021_outdoors_run0" 109 | }, 110 | { 111 | "codename": "aspen0", 112 | "path": "2_24_2021_aspen_run0" 113 | }, 114 | { 115 | "codename": "aspen9", 116 | "path": "2_24_2021_aspen_run9" 117 | }, 118 | { 119 | "codename": "edgar5", 120 | "path": "2_23_2021_edgar_army_run5" 121 | }, 122 | { 123 | "codename": "longboard1", 124 | "path": "2_22_2021_longboard_run1" 125 | } 126 | ] 127 | ``` 128 | In the example shown above, each subset of the dataset to handle, is registered along with a short 129 | `codename` to access it. The subsets already registered can be accessed with their corresponding 130 | codename. New subsets of the Coloradar dataset can be added similarly. The codenames can 131 | even be updated to suit the naming convention that you would prefer. 132 | 133 | With that, you're all set to play around with the Coloradar dataset. 134 | 135 | ## Usage 136 | 137 | **IMPORTANT NOTE**: 138 | - _If you've set up a virtual environment, don't forget to enable it first_ 139 | - _Renderings are done based on a web-based backend. So, a web browser tab will automatically be launched for all renderings._ 140 | 141 | The easiest way to have an overview of all the available options to interact with the dataset 142 | is the help command. 143 | 144 | 145 | ```bash 146 | python coloradar.py -h 147 | ``` 148 | 149 | However, find below the cheat sheet of this CLI tool 150 | 151 | 152 | 1. Overview 153 | 154 | ```bash 155 | # Print the 'dataset.json' configuration file 156 | python coloradar.py -o 157 | python coloradar.py --overview 158 | ``` 159 | 160 | Either one of these commands pretty-prints the entire `dataset/dataset.json` file to allow a quick 161 | overview of the current configuration in use. 162 | 163 | Since each subset of the dataset receives a codename to interact with it, you can request 164 | the list of currently registered subsets of the dataset and their codenames as follows: 165 | 166 | ```bash 167 | # Get the list of registered dataset and their codenames 168 | python coloradar.py -o --codename 169 | python coloradar.py --overview --codename 170 | ``` 171 | 172 | 2. Lidar sensor 173 | 174 | ```bash 175 | # Render lidar 3D pointcloud 176 | python coloradar.py --dataset -i --lidar 177 | 178 | # Render lidar pointcloud bird eye view 179 | python coloradar.py --dataset -i --lidar -bev 180 | python coloradar.py --dataset -i --lidar --bird-eye-view 181 | ``` 182 | 183 | See examples below: 184 | 185 | ```bash 186 | # Render lidar 3D pointcloud 187 | python coloradar.py --dataset hallway0 -i 130 --lidar 188 | 189 | # Render lidar pointcloud bird eye view 190 | python coloradar.py --dataset hallway0 -i 175 --lidar -bev 191 | ``` 192 | 193 | 194 | 3. Radar sensor 195 | 196 | The CLI options to access the data from either the single-chip radar sensor or cascaded 197 | radar are quite similar. 198 | 199 | The shorthand used is: 200 | - `scradar`: single-chip radar 201 | - `ccradar`: cascaded chip radar 202 | 203 | Therefore, we have the following commands 204 | 205 | 206 | ```bash 207 | # Render single chip radar 3D pointcloud 208 | python coloradar.py --dataset -i --scradar 209 | 210 | # Render single chip radar birds' eye view from 3D pointcloud 211 | # Resolution in meter/pixel | Eg.: 0.1 -> 10cm / pixel 212 | python coloradar.py --dataset -i --scradar -bev --resolution 213 | 214 | # Render single chip radar 3D heatmap 215 | python coloradar.py --dataset -i --scradar --heatmap 216 | 217 | 218 | # 219 | # Processing of raw ADC samples from single chip radar 220 | # 221 | 222 | # Render single chip radar 3D heatmap from raw ADC 223 | python coloradar.py --dataset -i --scradar --raw [--no-sidelobe] 224 | 225 | # Render single chip radar 2D heatmap from raw ADC samples 226 | python coloradar.py --dataset -i --scradar --raw --heatmap-2d 227 | 228 | # Render single chip radar 3D pointcloud from raw ADC samples 229 | python coloradar.py --dataset -i --scradar --raw -pcl 230 | 231 | # Render single chip radar pointcloud bird eye view from raw ADC samples 232 | python coloradar.py --dataset -i --scradar --raw -pcl -bev 233 | ``` 234 | 235 | ```bash 236 | # Render cascaded chip radar 3D pointcloud 237 | python coloradar.py --dataset -i --ccradar 238 | 239 | # Render cascaded chip radar birds' eye view from 3D pointcloud 240 | # Resolution in meter/pixel | Eg.: 0.1 -> 10cm / pixel 241 | python coloradar.py --dataset -i --ccradar -bev --resolution 242 | 243 | # Render cascaded chip radar 3D heatmap 244 | python coloradar.py --dataset -i --ccradar --heatmap 245 | 246 | 247 | # 248 | # Processing of raw ADC samples from cascaded radar sensor 249 | # 250 | 251 | # Render cascaded chip radar 3D heatmap from raw ADC samples 252 | python coloradar.py --dataset -i --ccradar --raw [--no-sidelobe] 253 | 254 | # Render cascaded chip radar 2D heatmap from raw ADC samples 255 | python coloradar.py --dataset -i --ccradar --raw --heatmap-2d 256 | 257 | # Render cascaded chip radar 3D pointcloud from raw ADC samples 258 | python coloradar.py --dataset -i --ccradar --raw -pcl 259 | 260 | # Render cascaded chip radar pointcloud bird eye view from raw ADC samples 261 | python coloradar.py --dataset -i --ccradar --raw -pcl -bev 262 | ``` 263 | 264 | See examples below: 265 | 266 | ```bash 267 | # Render single chip radar 3D pointcloud 268 | python coloradar.py --dataset hallway0 -i 130 --scradar --raw 269 | 270 | # Render cascaded chip radar pointcloud bird eye view from raw ADC samples 271 | python coloradar.py --dataset hallway0 -i 175 --ccradar --raw -pcl -bev 272 | ``` 273 | 274 | 4. Batched processing and save outputs 275 | 276 | You can note that the index option `-i` is no longer needed. The path given for 277 | the `save-to` option could be a non-existing one. The path will automatically be 278 | created in that case. 279 | 280 | ```bash 281 | # Render and save all lidar bird eye view of a given subset of the dataset 282 | python coloradar.py --dataset --lidar -bev --save-to 283 | 284 | # Render and save all single chip radar plointcloud bird eye view of a given subset of the dataset 285 | python coloradar.py --dataset --scradar --raw -pcl -bev --save-to 286 | 287 | # Render and save all cascaded chip radar plointcloud bird eye view of a given subset of the dataset 288 | python coloradar.py --dataset --ccradar --raw -pcl -bev --save-to 289 | ``` 290 | 291 | 5. Save post-processed files as `.csv` or `.bin` files 292 | 293 | The placeholder `` could be either `csv` or `bin`. Binary files are saved as float32 values. 294 | 295 | - `csv`: Comma Separated Value 296 | - `bin`: Binary 297 | 298 | ```bash 299 | # Save all cascaded chip radar plointcloud of a given subset of the dataset as "csv" or "bin" files 300 | python coloradar.py --dataset --ccradar --raw -pcl --save-as --save-to 301 | 302 | # Example for saving post-processed pointcloud as bin files 303 | python coloradar.py --dataset outdoor0 --ccradar --raw -pcl --save-as bin --save-to output 304 | ``` 305 | 306 | The binary files generated can be read as follows: 307 | 308 | ```python 309 | import numpy as np 310 | 311 | # [0]: Azimuth 312 | # [1]: Range 313 | # [2]: Elevation 314 | # [3]: Velocity 315 | # [4]: Intensity of reflection in dB or SNR 316 | data = np.fromfile(fileptah, dtype=np.float32, count=-1).reshape(-1, 5) 317 | ``` 318 | 319 | 6. Animation 320 | 321 | ```bash 322 | # Create a video out of the images present in the input folder provided 323 | python coloradar.py --dataset --animate 324 | ``` 325 | 326 | The generated video is saved in the same folder as the images. 327 | -------------------------------------------------------------------------------- /core/calibration.py: -------------------------------------------------------------------------------- 1 | """Calibration module.""" 2 | import numpy as np 3 | 4 | import os 5 | import json 6 | from core.config import ROOTDIR 7 | from core.transform import ( 8 | BaseToCCRadar, 9 | BaseToSCRadar, 10 | BaseToLidar, 11 | BaseToImu, 12 | BaseToVicon, 13 | ) 14 | 15 | 16 | class AntennaConfig: 17 | """Antenna configuration. 18 | 19 | Attributes: 20 | num_rx (int): Number of reception antenna 21 | num_tx (int): Number of transmission antenna 22 | f_design (float): Base frequency the sensor has been designed for 23 | rxl (NDArray): Array describing the configuration of the 24 | reception antenna in unit of half-wavelengths 25 | txl (NDArray): Array describing the configuration of the 26 | transmission antenna in unit of half-wavelengths 27 | 28 | NOTE: 29 | The 'rxl' and 'txl' arrays describe the antenna layout using the 30 | followinf structure [idx az, el]. 31 | 32 | Thus, the first column indicates the index number of the array. 33 | The second is the azimuth position of each antenna in half a wave 34 | length and the last column their elevation position. 35 | 36 | It's important to keep in mind that the row wise ordering of those 37 | matrices is [dev4, dev1, dev3, dev2] for the cascade radar. 38 | """ 39 | 40 | def __init__(self, filepath: str) -> None: 41 | """Init Antenna config. 42 | 43 | Argument: 44 | filepath: Path to the antenna configuration file 45 | """ 46 | rxl = [] # RX layout 47 | txl = [] # TX layout 48 | with open(os.path.join(ROOTDIR, filepath), "r") as fh: 49 | for line in fh: 50 | if line.startswith("# "): 51 | continue 52 | else: 53 | chunks = line.strip().split(" ") 54 | if chunks[0] == "rx": 55 | rxl.append([int(x) for x in chunks[1:]]) 56 | elif chunks[0] == "tx": 57 | txl.append([int(x) for x in chunks[1:]]) 58 | else: 59 | setattr(self, chunks[0].lower(), float(chunks[1])) 60 | self.num_rx = int(self.num_rx) 61 | self.num_tx = int(self.num_tx) 62 | self.rxl = np.array(rxl) 63 | self.txl = np.array(txl) 64 | 65 | 66 | class CouplingCalibration: 67 | """Coupling calibration. 68 | 69 | Attributes: 70 | num_rx (int): Number of reception antenna 71 | num_tx (int): Number of transmission antenna 72 | num_range_bins (int): Number of range bins 73 | num_doppler_bins (int): Number of doppler frequency bins 74 | data (list[float]): Array description coupling calibartion data 75 | 76 | TODO: Process rge raw calibration data 77 | """ 78 | 79 | def __init__(self, filepath: str) -> None: 80 | """Init coupling calibration.""" 81 | with open(os.path.join(ROOTDIR, filepath), "r") as fh: 82 | for line in fh: 83 | if line.startswith("# "): 84 | continue 85 | else: 86 | name, value = line.split(":") 87 | value = value.split(",") 88 | if len(value) > 1: 89 | setattr(self, name.lower(), [float(x) for x in value]) 90 | else: 91 | setattr(self, name.lower(), int(value[0])) 92 | 93 | 94 | class HeatmapConfiguration: 95 | """Heatmap configuration. 96 | 97 | Attributes: 98 | num_range_bins (int): Number of range bins 99 | num_elevation_bins (int): Number of elevation bins 100 | num_azimuth_bins (int): Number of azimuth bins 101 | range_bin_width (float): Width of range bin - range resolution 102 | azimuth_bins (list[float]): Array describing the azimuth bin 103 | elevation_bins (list[float]): Array describing the elevation bin 104 | """ 105 | 106 | def __init__(self, filepath: str) -> None: 107 | """Init heatmap configuration.""" 108 | with open(os.path.join(ROOTDIR, filepath), "r") as fh: 109 | for line in fh: 110 | if line.startswith("# "): 111 | continue 112 | else: 113 | value: list[str] = line.split(" ") 114 | if len(value) > 2: 115 | setattr(self, value[0].lower(), [float(x) for x in value[1:-1]]) 116 | else: 117 | setattr(self, value[0].lower(), float(value[1])) 118 | self.num_range_bins = int(self.num_range_bins) 119 | self.num_elevation_bins = int(self.num_elevation_bins) 120 | self.num_azimuth_bins = int(self.num_azimuth_bins) 121 | 122 | 123 | class WaveformConfiguration: 124 | """Waveform configuration. 125 | 126 | Attributes: 127 | num_rx (int): Number of reception antenna 128 | num_tx (int): Number of transmission antenna 129 | num_adc_samples_per_chirp (int): Number of ADC samples per chirp 130 | num_chirps_per_frame (int): Number of chirp per frame 131 | adc_sample_frequency (float): ADC sampling frequency in herz 132 | start_frequency (float): Chirp start frequency in herz 133 | idle_time (float): Idle time before starting a new chirp in second 134 | adc_start_time (float): Start time of ADC in second 135 | ramp_end_time (float): End time of frequency ramp 136 | frequency_slope (float): Frequency slope 137 | """ 138 | 139 | def __init__(self, filepath: str) -> None: 140 | """Init waveform configuration.""" 141 | with open(os.path.join(ROOTDIR, filepath), "r") as fh: 142 | for line in fh: 143 | if line.startswith("# "): 144 | continue 145 | else: 146 | name, value = line.split(" ") 147 | setattr(self, name.lower(), float(value)) 148 | self.num_rx = int(self.num_rx) 149 | self.num_tx = int(self.num_tx) 150 | self.num_adc_samples_per_chirp = int(self.num_adc_samples_per_chirp) 151 | self.num_chirps_per_frame = int(self.num_chirps_per_frame) 152 | 153 | 154 | class PhaseCalibration: 155 | """Phase/Frequency calibration. 156 | 157 | Attributes: 158 | num_rx (int): Number of reception antenna 159 | num_tx (int): Number of transmission antenna 160 | frequency_slope (float): Frequency slope 161 | sampling_rate (float): ADC sampling frequency in Herz 162 | frequency_calibration_matrix (list[float]): Sensor frequency 163 | calibration matrix 164 | """ 165 | 166 | def __init__(self, filepath: str) -> None: 167 | """Init Phase/Frequency configuration.""" 168 | with open(os.path.join(ROOTDIR, filepath), "r") as fh: 169 | config = json.load(fh) 170 | setattr(self, "num_rx", config["antennaCalib"]["numRx"]) 171 | setattr(self, "num_tx", config["antennaCalib"]["numTx"]) 172 | setattr(self, 173 | "frequency_slope", 174 | config["antennaCalib"]["frequencySlope"] 175 | ) 176 | setattr(self, 177 | "sampling_rate", 178 | config["antennaCalib"]["samplingRate"] 179 | ) 180 | setattr(self, 181 | "frequency_calibration_matrix", 182 | config["antennaCalib"]["frequencyCalibrationMatrix"] 183 | ) 184 | setattr(self, 185 | "phase_calibration_matrix", 186 | config["antennaCalib"]["phaseCalibrationMatrix"] 187 | ) 188 | 189 | 190 | class SCRadarCalibration: 191 | """Single Chip Radar Calibration. 192 | 193 | Holds the single chip radar sensor calibration parameters 194 | 195 | Attributes: 196 | antenna: Antenna configuration 197 | coupling: Antenna coupling calibration 198 | heatmap: Heatmap recording configuration 199 | waveform: Waveform generation parameters and calibration 200 | d: The optimal inter-antenna distance estimation in unit of 201 | a wavelength 202 | """ 203 | 204 | def __init__(self, config: dict[str, str]) -> None: 205 | """Init Single Chip radar calibration. 206 | 207 | Arguemnt: 208 | config: Dictionary containing the paths to files holding the 209 | radar calibration settiings. The main keys are: 210 | 211 | "antenna": Antenna calibration 212 | "couling": Coupling calibration 213 | "heatmap": heatmap configuration 214 | "waveform": Waveform configuration 215 | 216 | NOTE: See dataset.json 217 | """ 218 | self.antenna = AntennaConfig(config["antenna"]) 219 | self.coupling = CouplingCalibration(config["coupling"]) 220 | self.waveform = WaveformConfiguration(config["waveform"]) 221 | self.heatmap = HeatmapConfiguration(config["heatmap"]) 222 | 223 | # Chirp start frequency in GHz 224 | fstart: float = self.waveform.start_frequency / 1e9 225 | # Chirp slope in GHz/s 226 | fslope: float = self.waveform.frequency_slope / 1e9 227 | # ADC sampling frequency in Hz 228 | fsample: float = self.waveform.adc_sample_frequency 229 | # Chrip sampling time in s 230 | stime: float = self.waveform.num_adc_samples_per_chirp / fsample 231 | # Antenna PCB design base frequency 232 | fdesign: float = self.antenna.f_design 233 | self.d = 0.5 * ((fstart + (fslope * stime) / 2) / fdesign) 234 | 235 | def get_coupling_calibration(self) -> np.array: 236 | """Return the coupling calibration array to apply on the range fft.""" 237 | return np.array(self.coupling.data).reshape( 238 | self.coupling.num_tx, 239 | self.coupling.num_rx, 240 | 1, 241 | self.waveform.num_adc_samples_per_chirp, 242 | ) 243 | 244 | 245 | class CCRadarCalibration(SCRadarCalibration): 246 | """Cascade Chip Radar Calibration. 247 | 248 | Holds the cascade chip radar sensor calibration parameters 249 | 250 | Attributes: 251 | antenna: Antenna configuration 252 | coupling: Antenna coupling calibration 253 | heatmap: Heatmap recording configuration 254 | waveform: Waveform generation parameters and calibration 255 | phase: Phase and frequency calibration 256 | """ 257 | 258 | def __init__(self, config: dict[str, str]) -> None: 259 | """Init Cascade Chip Radar calibration. 260 | 261 | Argument: 262 | config: In addition to the keys present in the super class, 263 | this config add the following one 264 | 265 | "phase": Phase and frequency calibration 266 | 267 | NOTE: See dataset.json 268 | """ 269 | super(CCRadarCalibration, self).__init__(config) 270 | self.phase = PhaseCalibration(config["phase"]) 271 | 272 | def get_phase_calibration(self) -> np.array: 273 | """Return the phase calibration array.""" 274 | # Phase calibrationm atrix 275 | pm = np.array(self.phase.phase_calibration_matrix) 276 | pm = pm[::2] + 1j * pm[1::2] 277 | pm = pm[0] / pm 278 | return pm.reshape( 279 | self.phase.num_tx, 280 | self.phase.num_rx, 281 | 1, 282 | 1 283 | ) 284 | 285 | def get_frequency_calibration(self) -> np.array: 286 | """Return the frequency calibration array.""" 287 | num_tx: int = self.phase.num_tx 288 | num_rx: int = self.phase.num_rx 289 | 290 | # Calibration frequency slope 291 | fcal_slope: float = self.phase.frequency_slope 292 | # Calibration sampling rate 293 | cal_srate: int = self.phase.sampling_rate 294 | 295 | fcal_matrix = np.array(self.phase.frequency_calibration_matrix) 296 | 297 | fslope: float = self.waveform.frequency_slope 298 | srate: int = self.waveform.adc_sample_frequency 299 | 300 | # Delta P 301 | # 302 | dp = fcal_matrix - fcal_matrix[0] 303 | 304 | cal_matrix = 2 * np.pi * dp * (fslope / fcal_slope) * (cal_srate / srate) 305 | cal_matrix /= self.waveform.num_adc_samples_per_chirp 306 | cal_matrix.reshape(num_tx, num_rx) 307 | cal_matrix = np.expand_dims(cal_matrix, -1) * np.arange( 308 | self.waveform.num_adc_samples_per_chirp 309 | ) 310 | return np.exp(-1j * cal_matrix).reshape( 311 | num_tx, 312 | num_rx, 313 | 1, 314 | self.waveform.num_adc_samples_per_chirp 315 | ) 316 | 317 | 318 | class BaseTransform: 319 | """Base transforms. 320 | 321 | Tranform to rotate and/or translate a point from the base coordinate frame 322 | to sensor specific coordinate frame. 323 | 324 | Attributes: 325 | to_ccradar: Tranfrom from base to cascade chip radar sensor coordinate 326 | frame 327 | to_scradar: Tranfrom from base to single chip radar sensor coordinate 328 | frame 329 | to_imu: Tranfrom from base to IMU sensor coordinate frame 330 | to_lidar: Tranfrom from base to lidar sensor coordinate frame 331 | to_vicon: Tranfrom from base to vicon sensor coordinate frame 332 | """ 333 | 334 | def __init__(self, config: dict[str, str]) -> None: 335 | """Init base transforms. 336 | 337 | Argument: 338 | config: Dictionary decribing the path to the transform files 339 | 340 | NOTE: See dataset.json 341 | """ 342 | self.to_ccradar = BaseToCCRadar(config["base-to-ccradar"]) 343 | self.to_scradar = BaseToSCRadar(config["base-to-scradar"]) 344 | self.to_imu = BaseToImu(config["base-to-imu"]) 345 | self.to_lidar = BaseToLidar(config["base-to-lidar"]) 346 | self.to_vicon = BaseToVicon(config["base-to-lidar"]) 347 | 348 | 349 | class Calibration: 350 | """Calibration.""" 351 | 352 | def __init__(self, rootdir: dict[str, dict[str, str]]) -> None: 353 | """Init. 354 | 355 | Argument: 356 | rootdir: Root directories to access sensors calibration config 357 | """ 358 | self.scradar = SCRadarCalibration(rootdir["scradar"]) 359 | self.ccradar = CCRadarCalibration(rootdir["ccradar"]) 360 | self.transform = BaseTransform(rootdir["transform"]) 361 | -------------------------------------------------------------------------------- /coloradar.py: -------------------------------------------------------------------------------- 1 | """Entrypoint of the package.""" 2 | import sys 3 | import argparse 4 | 5 | import matplotlib 6 | matplotlib.use("WebAgg") 7 | import matplotlib.pyplot as plt 8 | 9 | from core.dataset import Coloradar 10 | from core.utils.common import info, success 11 | 12 | 13 | def main () -> None: 14 | """Main entry point for the cli interface.""" 15 | parser = argparse.ArgumentParser( 16 | prog="coloradar.py", 17 | description="Facility for interacting with the Coloradar dataset." 18 | ) 19 | parser.add_argument( 20 | "-v", "--version", 21 | help="Print software version and information about the dataset.", 22 | action="store_true" 23 | ) 24 | parser.add_argument( 25 | "-o", "--overview", 26 | help="Print the dataset JSON description file", 27 | action="store_true" 28 | ) 29 | parser.add_argument( 30 | "--codename", 31 | help="Render the available sub-datasets and their codename", 32 | action="store_true" 33 | ) 34 | parser.add_argument( 35 | "-d", "--dataset", 36 | type=str, 37 | help="Codename of the sub-dataset record to load" 38 | ) 39 | parser.add_argument( 40 | "-i", "--index", 41 | type=int, 42 | help="Index of the dataset entry to read" 43 | ) 44 | parser.add_argument( 45 | "--lidar", 46 | help="Render the lidar pointcloud", 47 | action="store_true" 48 | ) 49 | parser.add_argument( 50 | "--scradar", 51 | help="Render the single chip radar pointcloud", 52 | action="store_true" 53 | ) 54 | parser.add_argument( 55 | "--ccradar", 56 | help="Render the cascade chip radar pointcloud", 57 | action="store_true" 58 | ) 59 | parser.add_argument( 60 | "--raw", 61 | help="Consider radar ADC measurement (only for scradar and ccradar)", 62 | action="store_true" 63 | ) 64 | parser.add_argument( 65 | "--heatmap", 66 | help="Render heatmap (only for scradar and ccradar)", 67 | action="store_true" 68 | ) 69 | parser.add_argument( 70 | "-pcl", "--pointcloud", 71 | help="Render 3D pointcloud (only for scradar and ccradar)", 72 | action="store_true" 73 | ) 74 | parser.add_argument( 75 | "--heatmap-2d", 76 | help="Render 2D heatmap (only for scradar and ccradar)", 77 | action="store_true" 78 | ) 79 | parser.add_argument( 80 | "--threshold", 81 | help="Threshold for filtering heatmap pointcloud", 82 | type=float, 83 | default=0.25, 84 | ) 85 | parser.add_argument( 86 | "--no-sidelobe", 87 | help="Skip the data within the first couple of meters to avoid sidelobes", 88 | action="store_true", 89 | default=False, 90 | ) 91 | parser.add_argument( 92 | "--velocity-view", 93 | help=( 94 | "Render the radar heatmap using velocity as the fourth dimension." 95 | " By default, this parameter is false and the gain in dB is used" 96 | "instead. However, it's only available for the processed raw ADC " 97 | "samples." 98 | ), 99 | action="store_true", 100 | default=False, 101 | ) 102 | parser.add_argument( 103 | "--bird-eye-view", 104 | "-bev", 105 | help="Request a bird eye view rendering", 106 | action="store_true", 107 | default=False 108 | ) 109 | parser.add_argument( 110 | "--resolution", 111 | "-r", 112 | help="Bird eye view resolution", 113 | type=float, 114 | default=0.025, 115 | ) 116 | parser.add_argument( 117 | "--width", 118 | help="Bird eye view image width", 119 | type=float, 120 | default=30.0, 121 | ) 122 | parser.add_argument( 123 | "--height", 124 | help="Bird eye view image height", 125 | type=float, 126 | default=30.0, 127 | ) 128 | parser.add_argument( 129 | "--groundtruth", 130 | help="Render the bounding box wrapping the groundtruth objects " 131 | "a given dataset entry", 132 | action="store_true" 133 | ) 134 | 135 | # Parameters to control the rendering of the heatmap 136 | parser.add_argument( 137 | "--range", 138 | help="Range to focus the heatmap on", 139 | type=float, 140 | default=None 141 | ) 142 | parser.add_argument( 143 | "--min-range", 144 | help="Min Range to render in the heatmap", 145 | type=float, 146 | default=None 147 | ) 148 | parser.add_argument( 149 | "--max-range", 150 | help="Max Range to render in the heatmap", 151 | type=float, 152 | default=None 153 | ) 154 | parser.add_argument( 155 | "--azimuth", 156 | help="Azimuth to focus the heatmap on", 157 | type=float, 158 | default=None 159 | ) 160 | parser.add_argument( 161 | "--min-azimuth", 162 | help="Azimuth to focus the heatmap on", 163 | type=float, 164 | default=None 165 | ) 166 | parser.add_argument( 167 | "--max-azimuth", 168 | help="Azimuth to focus the heatmap on", 169 | type=float, 170 | default=None 171 | ) 172 | parser.add_argument( 173 | "--polar", 174 | help="Render heatmap in polar coordinate. " 175 | "Carterian coordinate is used by default otherwise", 176 | action="store_true", 177 | default=False 178 | ) 179 | parser.add_argument( 180 | "-s", "--save-as", 181 | help="Save post-processed ADC samples in files. Values: 'bin', 'csv'", 182 | type=str, 183 | default="", 184 | ) 185 | parser.add_argument( 186 | "--save-to", 187 | help="Destination folder to save the processed data to.", 188 | type=str, 189 | default=None, 190 | ) 191 | parser.add_argument( 192 | "--animate", 193 | help="Folder to read input images from", 194 | type=str, 195 | default=None, 196 | ) 197 | 198 | args = parser.parse_args() 199 | 200 | coloradar = Coloradar() 201 | 202 | if args.overview: 203 | if args.codename: 204 | coloradar.printCodenames() 205 | sys.exit(0) 206 | print(coloradar) 207 | sys.exit(0) 208 | 209 | if args.dataset and args.index: 210 | # Get an instance of the Record class 211 | record = coloradar.getRecord(args.dataset, args.index) 212 | 213 | if args.lidar: 214 | record.load("lidar") 215 | if args.bird_eye_view: 216 | info("Rendering lidar pointcloud bird eye view ...") 217 | bev = record.lidar.getBirdEyeView( 218 | args.resolution, 219 | (-args.width/2, args.width/2), 220 | (-args.height/2, args.height/2), 221 | ) 222 | success("Bird Eye View successfully rendred!") 223 | plt.imshow(bev) 224 | plt.show() 225 | info("Bird Eye View closed!") 226 | sys.exit(0) 227 | info("Rendering lidar pointcloud ...") 228 | record.lidar.show() 229 | success("Successfully closed!") 230 | sys.exit(0) 231 | elif args.scradar: 232 | record.load("scradar") 233 | if args.raw: 234 | info("Processing raw ADC samples ...") 235 | if args.pointcloud: 236 | info("Rendering Radar pointcloud ...") 237 | record.scradar.showPointcloudFromRaw( 238 | args.velocity_view, 239 | args.bird_eye_view, 240 | args.polar 241 | ) 242 | success("Successfully closed!") 243 | sys.exit(0) 244 | elif args.heatmap_2d: 245 | info("Rendering 2D heatmap ...") 246 | record.scradar.show2dHeatmap() 247 | sys.exit(0) 248 | info("Rendering processed raw radar ADC samples ...") 249 | record.scradar.showHeatmapFromRaw( 250 | args.threshold, 251 | args.no_sidelobe, 252 | args.velocity_view, 253 | args.polar, 254 | (args.min_range, args.max_range), 255 | (args.min_azimuth, args.max_azimuth), 256 | ) 257 | success("Successfully closed!") 258 | sys.exit(0) 259 | elif args.bird_eye_view: 260 | info("Rendering single chip radar pointcloud bird eye view ...") 261 | bev = record.scradar.getBirdEyeView( 262 | args.resolution, 263 | (-args.width/2, args.width/2), 264 | (-args.height/2, args.height/2), 265 | ) 266 | success("Bird Eye View successfully rendred!") 267 | plt.imshow(bev) 268 | plt.show() 269 | info("Bird Eye View closed!") 270 | sys.exit(0) 271 | elif args.heatmap: 272 | info("Rendering single chip radar heatmap ...") 273 | record.scradar.showHeatmap(args.threshold, args.no_sidelobe) 274 | success("Heatmap closed!") 275 | sys.exit(0) 276 | info("Rendering single chip radar pointcloud ...") 277 | record.scradar.show() 278 | success("Successfully closed!") 279 | sys.exit(0) 280 | elif args.ccradar: 281 | record.load("ccradar") 282 | if args.raw: 283 | info("Processing raw ADC samples.") 284 | if args.pointcloud: 285 | info("Rendering Radar pointcloud ...") 286 | record.ccradar.showPointcloudFromRaw( 287 | args.velocity_view, 288 | args.bird_eye_view, 289 | args.polar 290 | ) 291 | success("Successfully closed!") 292 | sys.exit(0) 293 | elif args.heatmap_2d: 294 | info("Rendering 2D heatmap ...") 295 | record.ccradar.show2dHeatmap(args.polar) 296 | sys.exit(0) 297 | info("Rendering 4D heatmap ...") 298 | record.ccradar.showHeatmapFromRaw( 299 | args.threshold, 300 | args.no_sidelobe, 301 | args.velocity_view, 302 | args.polar, 303 | (args.min_range, args.max_range), 304 | (args.min_azimuth, args.max_azimuth), 305 | ) 306 | success("Successfully closed!") 307 | sys.exit(0) 308 | elif args.bird_eye_view: 309 | info("Rendering cascaded chip radar pointcloud bird eye view ...") 310 | bev = record.ccradar.getBirdEyeView( 311 | args.resolution, 312 | (-args.width/2, args.width/2), 313 | (-args.height/2, args.height/2), 314 | ) 315 | success("Bird Eye View successfully rendred!") 316 | plt.imshow(bev) 317 | plt.show() 318 | info("Bird Eye View closed!") 319 | sys.exit(0) 320 | elif args.heatmap: 321 | info("Rendering cascade chip radar heatmap ...") 322 | record.ccradar.showHeatmap(args.threshold, args.no_sidelobe) 323 | success("Heatmap closed!") 324 | sys.exit(0) 325 | info("Rendering cascaded chip radar pointcloud ...") 326 | record.ccradar.show() 327 | success("Successfully closed!") 328 | sys.exit(0) 329 | elif args.dataset and args.save_to: 330 | record = coloradar.getRecord(args.dataset, 0) 331 | if args.lidar: 332 | info(f"Generated batches of lidar heatmap from '{args.dataset}'") 333 | record.process_and_save( 334 | "lidar", 335 | resolution=args.resolution, 336 | srange=(-args.width/2, args.width/2), 337 | frange=(-args.height/2, args.height/2), 338 | output=args.save_to, 339 | ) 340 | success("BEV generated with success!") 341 | sys.exit(0) 342 | elif (args.ccradar or args.scradar) and args.raw: 343 | sensor: str = "scradar" 344 | if args.ccradar: 345 | sensor = "ccradar" 346 | if args.heatmap_2d: 347 | info(f"Generating batches of radar 2D-heatmap from '{args.dataset}'") 348 | record.process_and_save( 349 | sensor, 350 | heatmap_3d=False, 351 | output=args.save_to, 352 | ) 353 | success("Radar 2D heatmap generated with success!") 354 | sys.exit(0) 355 | if args.pointcloud: 356 | info(f"Generating batches of radar pointcloud from '{args.dataset}'") 357 | record.process_and_save( 358 | sensor, 359 | output=args.save_to, 360 | velocity_view=args.velocity_view, 361 | bird_eye_view=args.bird_eye_view, 362 | polar=args.polar, 363 | pointcloud=True, 364 | save_as=args.save_as, 365 | ) 366 | success("Radar pointcloud generated with success!") 367 | sys.exit(0) 368 | info(f"Generating batches of radar heatmap from '{args.dataset}'") 369 | record.process_and_save( 370 | sensor, 371 | output=args.save_to, 372 | threshold=args.threshold, 373 | no_sidelobe=args.no_sidelobe, 374 | velocity_view=args.velocity_view, 375 | heatmap_3d=True, 376 | ) 377 | success("Radar 3D heatmap generated with success!") 378 | sys.exit(0) 379 | 380 | elif args.dataset and args.animate: 381 | record = coloradar.getRecord(args.dataset, 0) 382 | record.make_video(args.animate) 383 | success("Animation generated with success!") 384 | sys.exit(0) 385 | parser.print_help() 386 | sys.exit(0) 387 | 388 | 389 | if __name__ == "__main__": 390 | main() 391 | -------------------------------------------------------------------------------- /core/utils/radardsp.py: -------------------------------------------------------------------------------- 1 | """Radar Digital Signal Processing. 2 | 3 | This radar signal processing module provides the necessary tools to 4 | process the raw IQ measurements of a MIMO radar sensor into exploitable 5 | pointcloud and heatmap. 6 | 7 | NOTE: Make sure that a calibration stage is applied to the raw ADC data 8 | before further processing. 9 | """ 10 | import numpy as np 11 | 12 | 13 | # Speed of light 14 | C: float = 299792458.0 15 | 16 | 17 | def steering_matrix(txl: np.array, rxl: np.array, 18 | az: np.array, el: np.array,) -> np.array: 19 | """Build a steering matrix. 20 | 21 | Arguments: 22 | txl: TX Antenna layout 23 | rxl: RX Antenna layout 24 | az: Azimuth angle 25 | el: Elevation angle 26 | """ 27 | taz = txl[:, 1] 28 | tel = txl[:, 2] 29 | raz = rxl[:, 1] 30 | rel = rxl[:, 2] 31 | 32 | laz = np.kron(taz, np.ones(len(raz))).reshape(-1, len(raz)) + raz 33 | laz = laz.reshape(-1, 1) 34 | lel = np.kron(tel, np.ones(len(rel))).reshape(-1, len(rel)) + rel 35 | lel = lel.reshape(-1, 1) 36 | # Virtual antenna array steering matrix 37 | smat = np.exp( 38 | 1j * np.pi * (laz * (np.cos(az) * np.sin(el)) + lel * np.cos(el)) 39 | ) 40 | return smat 41 | 42 | 43 | def music(signal: np.array, txl: np.array, rxl: np.array, 44 | az_bins: np.array, el_bins: np.array) -> np.array: 45 | """MUSIC Direction of Arrival estimation algorithm. 46 | 47 | Arguments: 48 | signal: Signal received by all the antenna element 49 | Is expected to be the combined received signal on each antenna 50 | element. 51 | txl: TX Antenna layout 52 | rxl: RX Antenna layout 53 | az_bins: Azimuth bins 54 | el_bins: Elevation bins 55 | 56 | NOTE: Under test 57 | """ 58 | # Number of targets expected 59 | T: int = 10 60 | 61 | # Number of antenna 62 | S: int = 12 63 | 64 | N = len(signal) 65 | signal = np.asmatrix(signal) 66 | # Covariance of the received signal 67 | R = (1.0 / N) * signal.H * signal 68 | 69 | eigval, eigvect = np.linalg.eig(R) 70 | idx = eigval.argsort()[::-1] 71 | eigval = eigval[idx] 72 | eigvect = eigvect[:, idx] 73 | 74 | V = eigvect[:, :T] 75 | Noise = eigvect[:, T:] 76 | 77 | A = steering_matrix(txl, rxl, az_bins, el_bins) 78 | A = np.asmatrix(A) 79 | return (1.0 / np.abs(A.H * (Noise * Noise.H) * A)) 80 | 81 | 82 | def esprit(signal: np.array, order: int, nb_sources: int) -> np.array: 83 | """ESPRIT Frequency estiamtion algorithm. 84 | 85 | Arguments: 86 | signal: Samples of the signal 87 | order: Order of the signal 88 | nb_sources: Number of sources (or targets) 89 | Return: 90 | Normalized angular frequencies 91 | """ 92 | N = len(signal) 93 | signal = np.asmatrix(signal) 94 | # Covariance of the received signal 95 | R = (1.0 / N) * signal.H * signal 96 | 97 | eigval, eigvect = np.linalg.eig(R) 98 | idx = eigval.argsort()[::-1] 99 | eigvect = eigvect[:, idx] 100 | 101 | s = eigvect[:, 0:nb_sources] 102 | s1 = s[0:order-1, :] 103 | s2 = s[1:order:, :] 104 | p = np.linalg.pinv(s1) @ s2 105 | eigs, _ = np.linalg.eig(p) 106 | return eigs 107 | 108 | 109 | def virtual_array(adc_samples: np.array, 110 | txl: list[list[int]], 111 | rxl: list[list[int]]) -> np.array: 112 | """Generate the virtual antenna array matching the layout provided. 113 | 114 | Arguments: 115 | adc_samples: Raw ADC samples with the shape (ntx, nrx, nc, ns) 116 | ntx: Number of TX antenna 117 | nrx: Number of RX antenna 118 | nc: Number of chirps per frame 119 | ns: Number of samples per chirp 120 | txl: TX antenna layout array 121 | - Structure per row: [tx_idx, azimuth, elevation] 122 | - Unit: Half a wavelength 123 | rxl: RX antenna layout array 124 | - Structure: [tx_idx, azimuth, elevation] 125 | - Unit: Half a wavelength 126 | 127 | Return: 128 | The virtual antenna array of shape (nel, naz, nc, ns) 129 | nel: Number of elevation layers 130 | naz: Number of azimuth positions 131 | nc, ns: See above (description of `adc_samples`) 132 | 133 | See the variable `va_shape` to see how the shape is estimated 134 | """ 135 | _, _, nc, ns = adc_samples.shape 136 | 137 | # Shape of the virtual antenna array 138 | va_shape: tuple = ( 139 | # Length of the elevation axis 140 | # the "+1" is to count for the 0-indexing used 141 | np.max(txl[:, 2]) + np.max(rxl[:, 2]) + 1, 142 | 143 | # Length of the azimuth axis 144 | # the "+1" is to count for the 0-indexing used 145 | np.max(txl[:, 1]) + np.max(rxl[:, 1]) + 1, 146 | 147 | # Number of chirps per frame 148 | nc, 149 | 150 | # Number of samples per chirp 151 | ns, 152 | ) 153 | 154 | # Virtual antenna array 155 | va = np.zeros(va_shape, dtype=np.complex128) 156 | 157 | # *idx: index of the antenna element 158 | # *az: azimuth of the antenna element 159 | # *el: elevation of the antenna element 160 | for tidx, taz, tel in txl: 161 | for ridx, raz, rel in rxl: 162 | # When a given azimuth and elevation position is already 163 | # populated, the new value is added to the previous to have 164 | # a strong signal feedback 165 | va[tel+rel, taz+raz, :, :] += adc_samples[tidx, ridx, :, :] 166 | return va 167 | 168 | 169 | def fft_size(size: int) -> int: 170 | """Computed the closest power of 2 to be use for FFT computation. 171 | 172 | Argument: 173 | size: current size of the samples. 174 | 175 | Return: 176 | Adequate window size for FFT. 177 | """ 178 | return 2 ** int(np.ceil(np.log(size) / np.log(2))) 179 | 180 | 181 | def get_max_range(fs: float, fslope: float) -> float: 182 | """Compute the maximum range of the radar. 183 | 184 | Arguments: 185 | fs: Sampling frequency 186 | fslope: Chirp slope frequency 187 | """ 188 | return fs * C / (2 * fslope) 189 | 190 | 191 | def get_max_velocity(ntx:int, fstart: float, tc: float) -> float: 192 | """Compute the maximum range of the radar. 193 | 194 | Arguments: 195 | ntx: Number of TX antenna 196 | fstart: Chirp start frequency 197 | tc: Chirp time 198 | """ 199 | return (C / fstart) / (4.0 * tc * ntx) 200 | 201 | 202 | def get_range_resolution(ns: int, fs: float, fslope, 203 | is_adc_filtered: bool = True) -> float: 204 | """Compute the range resolution of a Radar sensor. 205 | 206 | Arguments: 207 | ns: Number of ADC samples per chirp 208 | fs: Sampling frequency 209 | fslope: Chrip frequency slope 210 | is_adc_filtered: Boolean flag to indicate if a window function 211 | has been applied to the ADC data before processing. In such case 212 | the range resolution is affected. This parameter is set to True 213 | by default. 214 | 215 | Return: 216 | Range resolution in meter 217 | """ 218 | rres: float = C / (ns * fslope / fs) 219 | if not is_adc_filtered: 220 | rres = rres / 2 221 | return rres 222 | 223 | 224 | def get_velocity_resolution(nc: int, fstart: float, tc: float, 225 | is_adc_filtered: bool = True) -> float: 226 | """Compute the vlocity resolution of a Radar sensor. 227 | 228 | Arguments: 229 | nc: Number of chirps per frame 230 | fstart: Start frequency of the chirp 231 | tc: Chirp time 232 | tc = Idle time + End time 233 | is_adc_filtered: Boolean flag to indicate if a window function 234 | has been applied to the ADC data before processing. In such case 235 | the velocity resolution is affected. This parameter is set to True 236 | by default. 237 | 238 | Return: 239 | Range velocity resolutio n in meter/s 240 | """ 241 | lbd: float = C / fstart # lambda 242 | vres = lbd / (tc * nc) 243 | if not is_adc_filtered: 244 | vres = vres / 2 245 | return vres 246 | 247 | 248 | 249 | def get_range_bins(ns: int, fs: float, fslope) -> np.array: 250 | """Return the range bins. 251 | 252 | Arguments: 253 | ns: Number of ADC samples per chirp 254 | fs: Sampling frequency 255 | fslope: Chrip frequency slope 256 | 257 | Return: 258 | Array of range bins 259 | """ 260 | rmax: float = get_max_range(fs, fslope) 261 | # Resolution used for rendering 262 | # Note: Not the actual sensor resolution 263 | rres = rmax / ns 264 | return np.arange(0, rmax, rres) 265 | 266 | 267 | def get_velocity_bins(ntx: int, nv: int, fstart: float, tc: float) -> np.array: 268 | """Compute the velocity bins 269 | 270 | Arguments: 271 | ntx:ntx: Number of transmission antenna 272 | nv: Number of expected velocity bins 273 | fstart: Start frequency of the chirp 274 | tc: Chirp time 275 | tc = Idle time + End time 276 | 277 | Return: 278 | Array of velocity bins 279 | """ 280 | vmax: float = get_max_velocity(ntx, fstart, tc) 281 | # Resolution used for rendering 282 | # Not the actual radar resolution 283 | vres = (2 * vmax) / nv 284 | 285 | bins = np.arange(-vmax, vmax, vres) 286 | return bins 287 | 288 | 289 | def get_elevation_bins() -> np.array: 290 | """.""" 291 | pass 292 | 293 | 294 | def get_azimuth_bins() -> np.array: 295 | """.""" 296 | pass 297 | 298 | 299 | def os_cfar(samples: np.array, ws: int, ngc: int = 2, tos: int = 8) -> np.array: 300 | """Ordered Statistic Constant False Alarm Rate detector. 301 | 302 | Arguments: 303 | samples: Non-Coherently integrated samples 304 | ws: Window Size 305 | ngc: Number of guard cells 306 | tos: Scaling factor 307 | 308 | Return: 309 | mask 310 | """ 311 | ns: int = len(samples) 312 | k: int = int(3.0 * ws/4.0) 313 | 314 | # Add leading and trailing zeros into order to run the algorithm over 315 | # the entire samples 316 | samples = np.append(np.zeros(ws), samples) 317 | samples = np.append(samples, np.zeros(ws)) 318 | 319 | mask = np.zeros(ns) 320 | for idx in range(ns): 321 | # tcells: training cells 322 | pre_tcells = samples[ws + idx - ngc - (ws // 2) : ws + idx - ngc] 323 | post_tcells = samples[ws + idx + ngc + 1: ws + idx + ngc + (ws // 2) + 1] 324 | tcells = np.array([]) 325 | tcells = np.append(tcells, pre_tcells) 326 | tcells = np.append(tcells, post_tcells) 327 | tcells = np.sort(tcells) 328 | if samples[ws + idx] > tcells[k] * tos: 329 | mask[idx] = 1 330 | return mask 331 | 332 | 333 | class ObjectDetected: 334 | """Object detected. 335 | 336 | Definition of object detected by applying CFAR 337 | 338 | NOTE: It's possible to have multiple detections on the same object 339 | depending on the resolution of the radar sensor 340 | """ 341 | 342 | vidx: int = -1 # Velocity bin index 343 | ridx: int = -1 # Range bin index 344 | aidx: int = -1 # Azimuth bin index 345 | eidx: int = -1 # Elevation bin 346 | snr: float = 0 # Signal over Noise ratio 347 | 348 | def __str__(self) -> str: 349 | return f"Obj(SNR:{self.snr:.2f})" 350 | 351 | def __repr__(self) -> str: 352 | return self.__str__() 353 | 354 | 355 | def nq_cfar_2d(samples, ws: int, ngc: int, 356 | quantile: float = 0.75, tos: int = 8) -> np.array: 357 | """N'th quantile statistic Constant False Alarm Rate detector. 358 | 359 | The principle is exactly the same as the Ordered Statistic 360 | Constant False Alarm Rate detector. This routine just applies 361 | it on a 2D signal. 362 | 363 | Arguments: 364 | samples: 2D signal to filter 365 | ws (int): Window size 366 | ngc (int): Number of guard cells 367 | quantile (float): Order of the quantile to compute for the noise 368 | power estimation 369 | tos (int): Scaling factor for detection an object 370 | """ 371 | nx, ny = samples.shape 372 | mask = np.zeros((nx, ny)) 373 | detections: list[ObjectDetected] = [] 374 | 375 | for xidx in range(nx): 376 | # Before CUT (Cell Under Test) start index on the x-axis 377 | xbs: int = xidx - ws 378 | xbs = xbs if (xbs > 0) else 0 379 | 380 | # Before CUT (Cell Under Test) end index on the x-axis 381 | xbe: int = xidx - ngc 382 | xbe = xbe if (xbe > 0) else 0 383 | 384 | # After CUT (Cell Under Test) start index on the x-axis 385 | xas: int = xidx + ngc + 1 386 | # After CUT (Cell Under Test) end index on the x-axis 387 | xae: int = xidx + ws + 1 388 | xae = xae if (xae < nx) else nx 389 | 390 | for yidx in range(ny): 391 | # Before CUT (Cell Under Test) start index on the y-axis 392 | ybs: int = yidx - ws 393 | ybs = ybs if (ybs > 0) else 0 394 | 395 | # Before CUT (Cell Under Test) end index on the y-axis 396 | ybe: int = yidx - ngc 397 | 398 | # After CUT (Cell Under Test) start index on the y-axis 399 | yas: int = yidx + ngc + 1 400 | 401 | # After CUT (Cell Under Test) end index on the y-axis 402 | yae: int = yidx + ws + 1 403 | yae = yae if (yae < ny) else ny 404 | 405 | tcells = np.array([]) 406 | if xbe > 0: 407 | tcells = samples[xbs:xbe, ybs:yae].reshape(-1) 408 | 409 | if xas < nx - 1: 410 | tcells = np.append( 411 | tcells, 412 | samples[xas:xae, ybs:yae].reshape(-1) 413 | ) 414 | 415 | if ybe > 0: 416 | tcells = np.append( 417 | tcells, 418 | samples[xbe:xas, ybs:ybe,].reshape(-1) 419 | ) 420 | 421 | if yas < nx - 1: 422 | tcells = np.append( 423 | tcells, 424 | samples[xbe:xas, yas:yae,].reshape(-1) 425 | ) 426 | m = np.quantile(tcells, quantile, method="weibull") 427 | if samples[xidx, yidx] > (m * tos): 428 | mask[xidx, yidx] = 1 429 | obj = ObjectDetected() 430 | obj.vidx = xidx 431 | obj.ridx = yidx 432 | obj.snr = samples[xidx, yidx] / m 433 | detections.append(obj) 434 | return mask, detections 435 | 436 | 437 | def velocity_compensation( ntx, nc) -> None: 438 | """Generate the compensation matrix for velocity-induced phase-shift. 439 | 440 | Meant to compensate the velocity-induced phase shift created by TDM MIMO 441 | configuration. 442 | 443 | Arguments: 444 | ntx: Number of transmission antenna 445 | nc: Number of chirps per frame 446 | 447 | Return: 448 | Phase shift correction matrix 449 | """ 450 | tl = np.arange(0, ntx) 451 | cl = np.arange(-nc//2, nc//2) 452 | tcl = np.kron(tl, cl) / (ntx * nc) 453 | # Velocity compensation 454 | vcomp = np.exp(-2j * np.pi * tcl) 455 | vcomp = vcomp.reshape(ntx, 1, nc, 1) 456 | return vcomp 457 | -------------------------------------------------------------------------------- /core/radar.py: -------------------------------------------------------------------------------- 1 | """Radar. 2 | 3 | SCRadar: Single Chip Radar Sensor 4 | CCRadar: Cascade Chip Radar Sensor 5 | """ 6 | from typing import Optional 7 | import os 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | 11 | from core.calibration import Calibration, SCRadarCalibration 12 | from core.utils.common import error, info 13 | from core.utils import radardsp as rdsp 14 | from .lidar import Lidar 15 | 16 | from core.config import NUMBER_RANGE_BINS_MIN 17 | from core.config import NUMBER_DOPPLER_BINS_MIN 18 | from core.config import NUMBER_AZIMUTH_BINS_MIN 19 | from core.config import NUMBER_ELEVATION_BINS_MIN 20 | from core.config import DOA_METHOD 21 | from core.config import RDSP_METHOD 22 | 23 | from core.config import RD_OS_CFAR_WS 24 | from core.config import RD_OS_CFAR_GS 25 | from core.config import RD_OS_CFAR_K 26 | from core.config import RD_OS_CFAR_TOS 27 | 28 | 29 | class SCRadar(Lidar): 30 | """Radar. 31 | 32 | Attrinutes: 33 | NUMBER_RECORDING_ATTRIBUTES: Number of 32-bit integer packed 34 | to form a single measurement recording 35 | AZ_OS_CFAR_WS: Constant False Alarm Rate Window Size 36 | AZ_OS_CFAR_GS: Constant False Alarm Rate Guard Cell 37 | AZ_OS_CFAR_TOS: Constant False Alarm Tos factor 38 | """ 39 | 40 | # The recorded attributes are: 41 | # x, y, z, I (Intensity of the reflections), Vr (Radial velocity) 42 | NUMBER_RECORDING_ATTRIBUTES: int = 5 43 | 44 | # 1D OS-CFAR Parameters used for peak selection in Azimuth-FFT 45 | AZ_OS_CFAR_WS: int = 8 # Window size 46 | AZ_OS_CFAR_GS: int = 4 # Guard cell 47 | AZ_OS_CFAR_TOS: int = 8 # Tos factor 48 | 49 | AZIMUTH_FOV: float = np.deg2rad(180) 50 | ELEVATION_FOV: float = np.deg2rad(20) 51 | 52 | def __init__(self, config: dict[str, str], 53 | calib: Calibration, index: int) -> None: 54 | """Init. 55 | 56 | Arguments: 57 | config (dict): Paths to access the dataset 58 | calib (Calibration): Calibration object (See calibration.py) 59 | index (int): Index of the lidar record to load 60 | """ 61 | self.sensor: str = self.__class__.__name__.lower() 62 | self.calibration: SCRadarCalibration = getattr(calib, self.sensor) 63 | self.config = config 64 | self.index = index 65 | 66 | # Read pointcloud 67 | self.cld = self._load( 68 | index, "pointcloud", np.float32, 69 | (-1, self.NUMBER_RECORDING_ATTRIBUTES) 70 | ) 71 | 72 | # Read heatmap 73 | self.heatmap = self._load( 74 | index, "heatmap", np.float32, 75 | ( 76 | self.calibration.heatmap.num_elevation_bins, 77 | self.calibration.heatmap.num_azimuth_bins, 78 | self.calibration.heatmap.num_range_bins, 79 | 2 # Number of value per bin (intensity and location) 80 | ) 81 | ) 82 | 83 | # read raw ADC measurements 84 | self.raw = None 85 | raw = self._load( 86 | index, "raw", np.int16, 87 | ( 88 | self.calibration.waveform.num_tx, 89 | self.calibration.waveform.num_rx, 90 | self.calibration.waveform.num_chirps_per_frame, 91 | self.calibration.waveform.num_adc_samples_per_chirp, 92 | 2 # I and Q signal measurements 93 | ) 94 | ) 95 | if raw is not None: 96 | # s = I + jQ 97 | I = np.float16(raw[:, :, :, :, 0]) 98 | Q = np.float16(raw[:, :, :, :, 1]) 99 | self.raw = I + 1j * Q 100 | 101 | def _load(self, index: int, ftype: str, dtype: np.dtype, 102 | shape: tuple[int, ...]) -> Optional[np.array]: 103 | """Load data. 104 | 105 | Arguments: 106 | index (int): Index of the datafile to load 107 | ftype (str): File type to load. It could be one of the following 108 | values: ('pointcloud', 'heatmap', 'raw') 109 | dtype (np.dtype): Data type in the file 110 | shape (tuple): The expected shape of the ouput array. The data 111 | will be reshaped based on this parameter 112 | 113 | Return: Data loaded. None if an error occured during the loading 114 | process. 115 | """ 116 | filename = self._filename( 117 | self.config["paths"][self.sensor][ftype]["filename_prefix"], 118 | index, 119 | "bin" 120 | ) 121 | filepath = os.path.join( 122 | self.config["paths"]["rootdir"], 123 | self.config["paths"][self.sensor][ftype]["data"], 124 | filename 125 | ) 126 | try: 127 | data = np.fromfile(filepath, dtype) 128 | data = np.reshape(data, shape) 129 | except FileNotFoundError: 130 | # error(f"File '{filepath}' not found.") 131 | data = None 132 | return data 133 | 134 | def showHeatmap(self, threshold: float = 0.15, no_sidelobe: bool = False, 135 | render: bool = True) -> None: 136 | """Render heatmap. 137 | 138 | Argument: 139 | threshold: Value used to filter the pointcloud 140 | no_sidelobe: Flag used to skip the close range data from rendering 141 | in order to avoid side lobes 142 | render: Flag triggering the rendering of the heatmap when 'true'. 143 | """ 144 | if self.heatmap is None: 145 | info("No heatmap available!") 146 | return None 147 | ax = plt.axes(projection="3d") 148 | ax.set_title("Heatmap") 149 | ax.set_xlabel("Azimuth") 150 | ax.set_ylabel("Range") 151 | ax.set_zlabel("Elevation") 152 | pcl = self._heatmap_to_pointcloud(threshold, no_sidelobe) 153 | plot = ax.scatter( 154 | pcl[:, 0], 155 | pcl[:, 1], 156 | pcl[:, 2], 157 | c=pcl[:, 3], 158 | cmap=plt.cm.get_cmap(), 159 | ) 160 | colorbar = plt.colorbar(plot) 161 | colorbar.set_label("Reflection Intensity") 162 | if render: 163 | plt.show() 164 | 165 | def showHeatmapBirdEyeView(self, threshold: float) -> None: 166 | """Render the bird eye view of the heatmp pointcloud. 167 | 168 | Argument: 169 | threshold (float): Threshold to filter the pointcloud 170 | 171 | TODO: To be fixed. Not working 172 | """ 173 | pointcloud = self._heatmap_to_pointcloud(threshold) 174 | x = pointcloud[:, 0] 175 | y = pointcloud[:, 1] 176 | z = pointcloud[:, 2] 177 | 178 | _range_bin_width: float = self.calibration.heatmap.range_bin_width 179 | _num_r_num: int = self.calibration.heatmap.num_range_bins 180 | max_range = np.ceil(_range_bin_width * _num_r_num) 181 | 182 | resolution: float = _range_bin_width/10 183 | srange: tuple[float, float] = (-max_range, max_range) 184 | frange: tuple[float, float] = (0, max_range) 185 | 186 | ximg = (-y / resolution).astype(np.int32) 187 | yimg = (-x / resolution).astype(np.int32) 188 | 189 | ximg -= int(np.floor(srange[0] / resolution)) 190 | yimg += int(np.floor(frange[1] / resolution)) 191 | 192 | # Prepare the three channels of the bird eye view image 193 | pixels = np.zeros((len(z), 3), dtype=np.uint8) 194 | 195 | # Encode distance 196 | norm = np.sqrt(x **2 + y **2 + z**2) 197 | pixels[:, 0] = (255.0 / (1.0 + np.exp(-norm))).astype(np.uint8) 198 | 199 | # Encode radial velosity of the radar 200 | pixels[:, 1] = ( 201 | 255.0 / (1.0 + np.exp(-np.abs(pointcloud[:, 4]))) 202 | ).astype(np.uint8) 203 | 204 | # Encode reflection intensity information 205 | pixels[:, 2] = (255.0 * pointcloud[:, 3]).astype(np.uint8) 206 | 207 | # Create the frame for the bird eye view 208 | # Note: the "+1" to estimate the width and height of the image is 209 | # to count for the (0, 0) position in the center of the pointcloud 210 | img_width: int = 1 + int((srange[1] - srange[0])/resolution) 211 | img_height: int = 1 + int((frange[1] - frange[0])/resolution) 212 | bev_img = np.zeros([img_height, img_width, 3], dtype=np.uint8) 213 | 214 | # Set the height information in the created image frame 215 | bev_img[yimg, ximg] = pixels 216 | 217 | plt.imshow(bev_img) 218 | plt.title(f"Heatmap BEV - ~{resolution:.4} m/pixel") 219 | plt.show() 220 | 221 | def _polar_to_cartesian(self, r_idx: int, 222 | az_idx: int, el_idx: int) -> np.array: 223 | """Convert polar coordinate to catesian coordinate. 224 | 225 | Example: 226 | self._polar_to_cartesian(range_idx - 1, az_idx - 1, el_idx - 1) 227 | """ 228 | _range_bin_width: float = self.calibration.heatmap.range_bin_width 229 | _el_bin: float = self.calibration.heatmap.elevation_bins[el_idx] 230 | _az_bin: float = self.calibration.heatmap.azimuth_bins[az_idx] 231 | 232 | point = np.zeros(3) 233 | point[0] = r_idx * _range_bin_width * np.cos(_el_bin) * np.cos(_az_bin) 234 | point[1] = r_idx * _range_bin_width * np.cos(_el_bin) * np.sin(_az_bin) 235 | point[2] = r_idx * _range_bin_width * np.sin(_el_bin) 236 | return point 237 | 238 | def _to_cartesian(self, hmap: np.array) -> np.array: 239 | """Convert polar coordinate heatmap to catesian coordinate. 240 | 241 | Argument: 242 | hmap: The heatmap of shape (-1, 5) 243 | Structure (columns): 244 | [0]: Azimuth 245 | [1]: Range 246 | [2]: Elevation 247 | [3]: Velocity 248 | [4]: Intensity of reflection in dB 249 | 250 | @see: showHeatmapFromRaw 251 | 252 | Example: 253 | self._to_cartesian(hmap) 254 | """ 255 | pcld = np.zeros(hmap.shape) 256 | pcld[:, 0] = hmap[:, 1] * np.cos(hmap[:, 2]) * np.sin(hmap[:, 0]) 257 | pcld[:, 1] = hmap[:, 1] * np.cos(hmap[:, 2]) * np.cos(hmap[:, 0]) 258 | pcld[:, 2] = hmap[:, 1] * np.sin(hmap[:, 2]) 259 | pcld[:, 3:] = hmap[:, 3:] 260 | return pcld 261 | 262 | def _heatmap_to_pointcloud(self, threshold: float = 0.15, 263 | no_sidelobe: bool = False) -> np.array: 264 | """Compute pointcloud from heatmap. 265 | 266 | The recordings of the heatmap are stored in the polar form. 267 | This function prepares a pointcloud in the cartesian coordinate 268 | system based the heatmap 269 | 270 | Argument: 271 | threshold (float): Threshold to filter the pointcloud 272 | no_sidelobe: Flag used to skip the close range data from rendering 273 | in order to avoid side lobes 274 | 275 | Return 276 | pcl (np.array): the heatmap point locations 277 | """ 278 | _num_el_bin: int = self.calibration.heatmap.num_elevation_bins 279 | _num_az_bin: int = self.calibration.heatmap.num_azimuth_bins 280 | _num_r_num: int = self.calibration.heatmap.num_range_bins 281 | 282 | _range_bin_width: float = self.calibration.heatmap.range_bin_width 283 | 284 | # transform range-azimuth-elevation heatmap to pointcloud 285 | pcl = np.zeros((_num_el_bin, _num_az_bin, _num_r_num, 5)) 286 | 287 | # Range offset to count for the side lobes of the radar self.sensor 288 | roffset: int = 5 289 | 290 | if not no_sidelobe: 291 | roffset = 0 292 | 293 | for range_idx in range(roffset, _num_r_num - 1): 294 | for az_idx in range(_num_az_bin - 1): 295 | for el_idx in range(_num_el_bin - 1): 296 | _el_bin: float = self.calibration.heatmap.elevation_bins[ 297 | el_idx 298 | ] 299 | _az_bin: float = self.calibration.heatmap.azimuth_bins[ 300 | az_idx 301 | ] 302 | pcl[el_idx + 1, az_idx + 1, range_idx + 1, :3] = np.array([ 303 | _az_bin, # Azimuth 304 | (range_idx) * _range_bin_width, # Range 305 | _el_bin, # Elevation 306 | ]) 307 | 308 | pcl = pcl.reshape(-1,5) 309 | pcl[:,3:] = self.heatmap.reshape(-1, 2) 310 | # Normalise the radar reflection intensity 311 | pcl[:, 3] -= np.min(pcl[:, 3]) 312 | pcl[:, 3] /= np.max(pcl[:, 3]) 313 | if threshold: 314 | pcl = pcl[pcl[:, 3] > threshold] 315 | # Re-Normalise the radar reflection intensity after filtering 316 | pcl[:, 3] -= np.min(pcl[:, 3]) 317 | pcl[:, 3] /= np.max(pcl[:, 3]) 318 | return pcl 319 | 320 | def _music(self) -> None: 321 | """Apply MUSIC algorithm for DoA estimation.""" 322 | # Calibrate raw data 323 | adc_samples = self.raw 324 | 325 | # ADC sampling frequency 326 | fs: float = self.calibration.waveform.adc_sample_frequency 327 | 328 | # Frequency slope 329 | fslope: float = self.calibration.waveform.frequency_slope 330 | 331 | # Start frequency 332 | fstart: float = self.calibration.waveform.start_frequency 333 | 334 | # Ramp end time 335 | te: float = self.calibration.waveform.ramp_end_time 336 | 337 | # Chirp time 338 | tc: float = self.calibration.waveform.idle_time + te 339 | 340 | Na = 64 341 | Ne = 64 342 | 343 | if self.sensor != "scradar": 344 | adc_samples *= self.calibration.get_frequency_calibration() 345 | adc_samples *= self.calibration.get_phase_calibration() 346 | 347 | ntx, nrx, nc, ns = adc_samples.shape 348 | 349 | rfft = np.fft.fft(adc_samples, ns, -1) - self.calibration.get_coupling_calibration() 350 | dfft = np.fft.fft(rfft, nc, -2) 351 | dfft = np.fft.fftshift(dfft, -2) 352 | dfft = dfft.reshape(ntx * nrx, nc, ns) 353 | 354 | # signal = np.sum(dfft, (1, 2)) 355 | # print("signal shape: ", signal.shape) 356 | 357 | vbins = rdsp.get_velocity_bins(ntx, nc, fstart, tc) 358 | rbins = rdsp.get_range_bins(ns, fs, fslope) 359 | 360 | # Azimuth bins 361 | ares = np.pi / Na 362 | abins = np.arange(-np.pi/2, np.pi/2, ares) 363 | # Elevation 364 | eres = np.pi / Ne 365 | ebins = np.arange(-np.pi/2, np.pi/2, eres) 366 | 367 | spectrum = np.zeros((ns, Ne, Na, 1), dtype=np.complex128) 368 | 369 | signal = np.sum(dfft, (1, 2)) 370 | 371 | spectrum = rdsp.music( 372 | signal, self.calibration.antenna.txl, self.calibration.antenna.rxl, abins, ebins 373 | ) 374 | ''' 375 | hmap = np.zeros((Na * Ne, 3)) 376 | 377 | for eidx in range(Ne): 378 | for aidx in range(Na): 379 | hmap_idx: int = aidx + Na * eidx 380 | hmap[hmap_idx] = np.array([ 381 | abins[aidx], 382 | ebins[eidx], 383 | spectrum[hmap_idx], 384 | ]) 385 | ''' 386 | 387 | # ax = plt.axes(projection="3d") 388 | fig = plt.figure() 389 | ax = fig.gca(projection="3d") 390 | # _, ax = plt.subplots(subplot_kw={"projection": "3d"}) 391 | ax.set_title("Test MUSIC") 392 | ax.set_xlabel("Azimuth") 393 | ax.set_ylabel("Elevation") 394 | ax.set_zlabel("Gain") 395 | el, az = np.meshgrid(ebins, abins) 396 | ''' 397 | map = ax.scatter( 398 | hmap[:, 0], 399 | hmap[:, 1], 400 | hmap[:, 2], 401 | c=hmap[:, 2], 402 | cmap=plt.cm.get_cmap() 403 | ) 404 | ''' 405 | surf = ax.plot_surface( 406 | el, az, spectrum.reshape(Ne, Na), 407 | cmap="coolwarm", 408 | rstride=1, 409 | cstride=1, 410 | alpha=None, 411 | # linewidth=0, 412 | # antialiased=False 413 | ) 414 | plt.colorbar(surf, shrink=0.5, aspect=1) 415 | plt.show() 416 | 417 | def _calibrate(self) -> np.array: 418 | """Handle the calibration of raw ADC samples. 419 | 420 | Return: 421 | Calibrated data based on the radar sensor's type 422 | 423 | NOTE: Only the casacde chip radar sensor has the frequency and phase 424 | calibration 425 | """ 426 | if self.raw is None: 427 | exit(1) 428 | adc_samples = self.raw 429 | 430 | # Remove DC bias 431 | adc_samples -= np.mean(adc_samples) 432 | 433 | if self.sensor != "scradar": 434 | adc_samples *= self.calibration.get_frequency_calibration() 435 | adc_samples *= self.calibration.get_phase_calibration() 436 | return adc_samples 437 | 438 | def _get_fft_size(self, ne: Optional[int], na: Optional[int], 439 | nc: Optional[int], ns: Optional[int]) -> tuple[int, int, int, int]: 440 | """Get optimal FFT size. 441 | 442 | Arguments: 443 | ne: Size of the elevation axis of the data cube 444 | na: Size of the azimuth axis of the data cube 445 | nc: Number of chirp loops 446 | ns: Number of samples per chirp 447 | 448 | Return: 449 | Tuple of the optimal size of each parameter provided in argument 450 | in the exact same order. 451 | """ 452 | # Estimated size of the elevation and azimuth 453 | if ne is not None: 454 | ne = rdsp.fft_size(ne) 455 | ne = ( 456 | ne if ne > NUMBER_ELEVATION_BINS_MIN else NUMBER_ELEVATION_BINS_MIN 457 | ) 458 | 459 | if na is not None: 460 | na = rdsp.fft_size(na) 461 | na = na if na > NUMBER_AZIMUTH_BINS_MIN else NUMBER_AZIMUTH_BINS_MIN 462 | 463 | if nc is not None: 464 | # Size of doppler FFT 465 | nc = rdsp.fft_size(nc) 466 | nc = nc if nc > NUMBER_DOPPLER_BINS_MIN else NUMBER_DOPPLER_BINS_MIN 467 | if ns is not None: 468 | # Size of range FFT 469 | ns = rdsp.fft_size(ns) 470 | ns = ns if ns > NUMBER_RANGE_BINS_MIN else NUMBER_RANGE_BINS_MIN 471 | 472 | return ne, na, nc, ns 473 | 474 | def _get_bins(self, ns: Optional[int], nc: Optional[int], na: Optional[int], 475 | ne: Optional[int]) ->tuple[np.array, np.array, np.array, np.array]: 476 | """Return the range, velocity, azimuth and elevation bins. 477 | 478 | Arguments: 479 | ne: Elevation FFT size 480 | na: Azimuth FFT size 481 | nc: Doppler FFT size 482 | ns: Range FFT size 483 | 484 | Return: 485 | range bins 486 | velocity bins 487 | azimuth bins 488 | elevation bins 489 | 490 | NOTE: The bins are returned in the order listed above 491 | """ 492 | # Number of TX antenna 493 | ntx: int = self.calibration.antenna.num_tx 494 | 495 | # ADC sampling frequency 496 | fs: float = self.calibration.waveform.adc_sample_frequency 497 | 498 | # Frequency slope 499 | fslope: float = self.calibration.waveform.frequency_slope 500 | 501 | # Start frequency 502 | fstart: float = self.calibration.waveform.start_frequency 503 | 504 | # Ramp end time 505 | te: float = self.calibration.waveform.ramp_end_time 506 | 507 | # Chirp time 508 | tc: float = self.calibration.waveform.idle_time + te 509 | 510 | rbins = np.array([]) # Range bins 511 | vbins = np.array([]) # Doppler bins 512 | abins = np.array([]) # Azimuth bins 513 | ebins = np.array([]) # Elevation bins 514 | 515 | if ns: 516 | rbins = rdsp.get_range_bins(ns, fs, fslope) 517 | 518 | if nc: 519 | # Velocity bins 520 | vbins = rdsp.get_velocity_bins(ntx, nc, fstart, tc) 521 | 522 | if na: 523 | # Azimuth bins 524 | ares = 2 * self.AZIMUTH_FOV / na 525 | # Estimate azimuth angles and flip the azimuth axis 526 | abins = -1 * np.arcsin( 527 | np.arange(-self.AZIMUTH_FOV, self.AZIMUTH_FOV, ares) / ( 528 | 2 * np.pi * self.calibration.d 529 | ) 530 | ) 531 | 532 | if ne: 533 | # Elevation 534 | eres = 2 * self.ELEVATION_FOV / ne 535 | # Estimate elevation angles and flip the elevation axis 536 | ebins = -1 * np.arcsin( 537 | np.arange(-self.ELEVATION_FOV, self.ELEVATION_FOV, eres) / ( 538 | 2 * np.pi * self.calibration.d 539 | ) 540 | ) 541 | return rbins, vbins, abins, ebins 542 | 543 | def _pre_process(self, adc_samples: np.array) -> tuple[np.array, np.array]: 544 | """Pre processing of ADC samples. 545 | 546 | The pre-processing step helps in reshaping the data so to match 547 | the antenna layout of the radar sensor. Some paddings are also 548 | added if required in order to have a minimum of pre-defined 549 | frequency bins during FFT processing. 550 | 551 | Since the number of antenna and device configuration can vary 552 | from one board or recording to another, 553 | it's good to define a minimum size for the doppler, azimuth, and 554 | elevation FFT processing (See `config.py` for the default values). 555 | 556 | This does not affect the resolution of the radar sensor but only 557 | the 3D rendeing. 558 | 559 | Argument: 560 | adc_samples: Calibrated ADC samples 561 | 562 | Return (tuple): 563 | virtual_array: 4D data cube ready for FFT processing 564 | """ 565 | virtual_array = rdsp.virtual_array( 566 | adc_samples, 567 | self.calibration.antenna.txl, 568 | self.calibration.antenna.rxl, 569 | ) 570 | 571 | # va_nel: Number of elevations in the virtual array 572 | # va_naz: Number of azimuth in the virtual array 573 | # va_nc: Number of chirp per antenna in the virtual array 574 | # va_ns: Number of samples per chirp 575 | va_nel, va_naz, va_nc, va_ns = virtual_array.shape 576 | 577 | Ne, Na, Nc, Ns = self._get_fft_size(*virtual_array.shape) 578 | virtual_array = np.pad( 579 | virtual_array, 580 | ( 581 | (0, Ne - va_nel), (0, Na - va_naz), 582 | (0, Nc - va_nc), (0, Ns - va_ns) 583 | ), 584 | "constant", 585 | constant_values=((0, 0), (0, 0), (0, 0), (0, 0)) 586 | ) 587 | return virtual_array 588 | 589 | def _td_processing(self): 590 | """Time Domain Processing.""" 591 | # Calibrated ADC samples 592 | adc_samples = self._calibrate() 593 | ntx, nrx, nc, ns = adc_samples.shape 594 | 595 | # Range, Doppler, Azimuth and Elevation bins 596 | rbins, vbins, abins, ebins = self._get_bins(ns, nc, nrx, ntx) 597 | 598 | fslope = self.calibration.waveform.frequency_slope 599 | fstart: float = self.calibration.waveform.start_frequency 600 | fsample = self.calibration.waveform.adc_sample_frequency 601 | 602 | # Ramp end time 603 | te: float = self.calibration.waveform.ramp_end_time 604 | 605 | # Chirp time 606 | tc: float = self.calibration.waveform.idle_time + te 607 | 608 | # Maximum range 609 | rmax: int = rdsp.get_max_range(fsample, fslope) 610 | 611 | srange = np.sum(adc_samples, (0, 1, 2)) 612 | amps = np.abs(srange) 613 | angles = np.angle(srange) 614 | freq = np.pi * np.sin(angles) # / (2 *) 615 | rg = freq * rdsp.C / (2 * fslope) 616 | plt.scatter(rg, amps) 617 | plt.show() 618 | exit(0) 619 | 620 | def _fesprit(self): 621 | """FESPRIT. 622 | 623 | A combination of FFT and ESPRIT based radar signal processing. 624 | The range, doppler, azimuth and elevation are all based on esprit while 625 | FFT is used to produce the intermediate signals to use for the processing. 626 | Unlike FFT (having sidelobes), the frequency estimation of ESPRIT is more 627 | precise and makes much of the ADC samples usable. 628 | 629 | NOTE: The current implementation is probably not much optimized and should 630 | be improved 631 | """ 632 | # Calibrated ADC samples 633 | adc_samples = self._calibrate() 634 | ntx, nrx, nc, ns = adc_samples.shape 635 | 636 | C: float = rdsp.C # Speed of light 637 | fslope = self.calibration.waveform.frequency_slope 638 | fstart = self.calibration.waveform.start_frequency 639 | fsample = self.calibration.waveform.adc_sample_frequency 640 | # Ramp end time 641 | te: float = self.calibration.waveform.ramp_end_time 642 | 643 | # Chirp time 644 | tc: float = self.calibration.waveform.idle_time + te 645 | 646 | # Maximum range 647 | rmax: int = rdsp.get_max_range(fsample, fslope) 648 | # Maximum velocity 649 | vmax: int = rdsp.get_max_velocity(ntx, fstart, tc) 650 | 651 | # Range-Doppler FFT 652 | rfft = np.fft.fft(adc_samples, ns, -1) 653 | rfft -= self.calibration.get_coupling_calibration() 654 | 655 | dfft = np.fft.fft(rfft, nc, -2) 656 | dfft = np.fft.fftshift(dfft, -2) 657 | vcomp = rdsp.velocity_compensation(ntx, nc) 658 | dfft *= vcomp 659 | 660 | __gain = np.abs(np.sum(dfft, (0, 1))) 661 | __noise = np.quantile(__gain, 0.10, (0, 1)) 662 | __snr = 10 * np.log10((__gain / __noise) + 1) 663 | 664 | # Range estimation with ESPRIT 665 | radc = np.sum(adc_samples, (0, 1, 2)) 666 | resp = rdsp.esprit(radc, ns, ns) 667 | _r = (fsample * (np.angle(resp) + np.pi ) * C) / (4 * np.pi * fslope) 668 | ridx = (_r * ns / rmax).astype(np.int16) - 1 669 | 670 | # Reshape the Range-FFT according to the virtual antenna layout 671 | rva = rdsp.virtual_array( 672 | rfft, 673 | self.calibration.antenna.txl, 674 | self.calibration.antenna.rxl, 675 | ) 676 | # Reshape the Doppler-FFT according to the virtual antenna layout 677 | va = rdsp.virtual_array( 678 | dfft, 679 | self.calibration.antenna.txl, 680 | self.calibration.antenna.rxl, 681 | ) 682 | va_ne, va_na, va_nc, _ = va.shape 683 | __pcl = [] 684 | 685 | for idx, _ridx in enumerate(ridx): 686 | # Azimuth estimation 687 | sample = np.sum(va[:, :, :, _ridx], (0, 2)) 688 | azesp = rdsp.esprit(sample, va_na, 1) 689 | _az = np.arcsin(np.angle(azesp) / np.pi) 690 | aidx = np.abs(_az[0] * va_na / self.AZIMUTH_FOV).astype(np.int16) - 1 691 | 692 | # Elevation estimation 693 | esample = np.sum(va[:, aidx, :, _ridx], 1) 694 | elesp = rdsp.esprit(esample, va_ne, 1) 695 | _el = np.arcsin(np.angle(elesp) / (2.8 * np.pi)) 696 | eidx = np.abs(_el[0] * va_ne / self.ELEVATION_FOV).astype(np.int16) - 1 697 | 698 | # Doppler velocity estimation 699 | vsample = rva[eidx, aidx, :, _ridx] 700 | vesp = rdsp.esprit(vsample, va_nc, 1) 701 | _v = (C/fstart) * np.angle(vesp) / (4 * np.pi * ntx * tc) 702 | vidx = np.abs(_v * nc / vmax).astype(np.int16) - 1 703 | 704 | __pcl.append(np.array([ 705 | _az[0], # Azimuth 706 | _r[idx], # Range 707 | _el[0], # Elevation 708 | _v[0], # Radial-Velocity 709 | __snr[vidx, idx], # Gain 710 | ], dtype=np.float32)) 711 | return np.array(__pcl) 712 | 713 | def _process_raw_adc(self) -> np.array: 714 | """Radar Signal Processing on raw ADC data. 715 | 716 | FFT Signal processing is applied on the raw Radar ADC samples. 717 | As a result, we get the Range, Doppler and Angle estimation of 718 | targets detected by the radar. 719 | 720 | NOTE: The Angle estimation based on FFT doesn't provide high accurary. 721 | Thus, more advanced methods like MUSIC or ESPRIT could be implemented 722 | in trade with computational time. 723 | """ 724 | # Calibrate raw data 725 | adc_samples = self._calibrate() 726 | ntx, nrx, nc, ns = adc_samples.shape 727 | adc_samples *= np.blackman(ns).reshape(1, 1, 1, -1) 728 | 729 | # Nc: Number of chirp per antenna in the virtual array 730 | # Ns: Number of samples per chirp 731 | _, _, Nc, Ns = self._get_fft_size(None, None, nc, ns) 732 | 733 | # Range-FFT 734 | rfft = np.fft.fft(adc_samples, Ns, -1) 735 | rfft -= self.calibration.get_coupling_calibration() 736 | 737 | # Doppler-FFT 738 | dfft = np.fft.fft(rfft, Nc, -2) 739 | dfft = np.fft.fftshift(dfft, -2) 740 | vcomp = rdsp.velocity_compensation(ntx, Nc) 741 | dfft *= vcomp 742 | 743 | _dfft = self._pre_process(dfft) 744 | 745 | # Ne: Number of elevations in the virtual array 746 | # Na: Number of azimuth in the virtual array 747 | Ne, Na, _, _ = self._get_fft_size(*_dfft.shape) 748 | 749 | # Azimuth estimation 750 | afft = np.fft.fft(_dfft, Na, 1) 751 | afft = np.fft.fftshift(afft, 1) 752 | 753 | # Elevation esitamtion 754 | efft = np.fft.fft(afft, Ne, 0) 755 | efft = np.fft.fftshift(efft, 0) 756 | 757 | # Return the signal power 758 | return np.abs(efft) ** 2 759 | 760 | def _generate_radar_pcl(self) -> np.array: 761 | """Generate point cloud.""" 762 | # Calibrated raw data 763 | adc_samples = self._calibrate() 764 | ntx: int = self.calibration.waveform.num_tx 765 | 766 | # ntx: Number of TX antenna 767 | # nrx: Number of RX antenna 768 | # nc: Number of chirp per antenna in the virtual array 769 | # ns: Number of samples per chirp 770 | ntx, nrx, nc, ns = adc_samples.shape 771 | _, _, Nc, Ns = self._get_fft_size(None, None, nc, ns) 772 | 773 | vcomp = rdsp.velocity_compensation(ntx, Nc) 774 | 775 | # Coupling calibration 776 | ccalib = self.calibration.get_coupling_calibration() 777 | # Doppler-velocity induced phase shift compensation matrix 778 | vcomp = rdsp.velocity_compensation(ntx, Nc) 779 | 780 | # Range-FFT 781 | samples = adc_samples * np.blackman(ns).reshape(1, 1, 1, -1) 782 | rfft = np.fft.fft(samples, Ns, -1) 783 | rfft -= ccalib 784 | # Doppler-FFT 785 | dfft = np.fft.fft(rfft, Nc, -2) 786 | dfft = np.fft.fftshift(dfft, -2) 787 | dfft *= vcomp 788 | 789 | mimo_dfft = dfft.reshape(ntx * nrx, Nc, Ns) 790 | mimo_dfft = np.sum(np.abs(mimo_dfft) ** 2, 0) 791 | 792 | # OS-CFAR for object detection 793 | _, detections = rdsp.nq_cfar_2d( 794 | mimo_dfft, 795 | RD_OS_CFAR_WS, 796 | RD_OS_CFAR_GS, 797 | RD_OS_CFAR_K, 798 | RD_OS_CFAR_TOS, 799 | ) 800 | 801 | va = rdsp.virtual_array( 802 | dfft, 803 | self.calibration.antenna.txl, 804 | self.calibration.antenna.rxl, 805 | ) 806 | va_nel, va_naz, va_nc, va_ns = va.shape 807 | 808 | Ne, Na, Nc, Ns = self._get_fft_size(*va.shape) 809 | 810 | va = np.pad( 811 | va, 812 | ( 813 | (0, Ne - va_nel), (0, Na - va_naz), 814 | (0, Nc - va_nc), (0, Ns - va_ns) 815 | ), 816 | "constant", 817 | constant_values=((0, 0), (0, 0), (0, 0), (0, 0)) 818 | ) 819 | 820 | # Range, doppler, azimuth and elevation bins 821 | rbins, vbins, abins, ebins = self._get_bins(Ns, Nc, Na, Ne) 822 | 823 | pcl = [] 824 | 825 | for idx, obj in enumerate(detections): 826 | obj.range = rbins[obj.ridx] 827 | obj.velocity = vbins[obj.vidx] 828 | 829 | if DOA_METHOD == "esprit": 830 | # Azimuth estimation (With ESPRIT) 831 | __az = rdsp.esprit(np.sum(va[:, :, obj.vidx, obj.ridx], 0), Na, 1) 832 | __az = np.arcsin(np.angle(__az) / np.pi) 833 | 834 | for _az in __az: 835 | obj.az = _az 836 | 837 | # Azimuth bin 838 | azidx = int((obj.az * Na/np.pi) + Na//2) 839 | 840 | # Elevation estimation (with ESPRIT) 841 | __el = rdsp.esprit(va[:, azidx, obj.vidx, obj.ridx], Ne, 3) 842 | """ 843 | NOTE: 844 | Since the vertical field of view is narrower and has a sparse 845 | minimum redundancy layout, the value "2.8" has been empirically 846 | espablished. And is equivalent to the 1/2 wavelength spacing bewteen 847 | the antenna elements in the elevation direction. 848 | 849 | d = (2.8) x 1/2 lambda 850 | 851 | METHOD: To define the value, the height of a known object has been 852 | recorded and used to tune of the inter-element spacing of the antenna 853 | in the elevation direction. 854 | 855 | The value -pi/2 is substracted from the estimated angular frequency to 856 | count for negative elevations. It's equivalent to the FFT-shift when 857 | performing a FFT processing. 858 | """ 859 | __el = np.arcsin((np.angle(__el) - np.pi/2) / (2.8 * np.pi)) 860 | for _el in __el: 861 | obj.el = _el 862 | pcl.append(np.array([ 863 | obj.az, # Azimnuth 864 | obj.range, # Range 865 | obj.el, # Elevation 866 | obj.velocity, # Velocity 867 | 10 * np.log10(obj.snr) # SNR 868 | ])) 869 | 870 | elif DOA_METHOD == "fft": 871 | afft = np.fft.fft(va[:, :, obj.vidx, obj.ridx], Na, 1) 872 | afft = np.fft.fftshift(afft, 1) 873 | mask = rdsp.os_cfar( 874 | np.abs(np.sum(afft, 0)).reshape(-1), 875 | self.AZ_OS_CFAR_WS, 876 | self.AZ_OS_CFAR_GS, 877 | self.AZ_OS_CFAR_TOS, 878 | ) 879 | _az = np.argwhere(mask == 1).reshape(-1) 880 | for _t in _az: 881 | efft = np.fft.fft(afft[:, _t], Ne, 0) 882 | efft = np.fft.fftshift(efft, 0) 883 | _el = np.argmax(efft) 884 | obj.az = abins[_t] 885 | obj.el = ebins[_el] 886 | 887 | pcl.append(np.array([ 888 | obj.az, # Azimnuth 889 | obj.range, # Range 890 | obj.el, # Elevation 891 | obj.velocity, # Velocity 892 | 10 * np.log10(obj.snr) # SNR 893 | ])) 894 | return np.array(pcl) 895 | 896 | def getPointcloudFromRaw(self, polar: bool = False) -> np.array: 897 | """Point post-processed radar pointcloud. 898 | 899 | Return: 900 | Pointcloud in the following format: 901 | [0]: Azimuth 902 | [1]: Range 903 | [2]: Elevation 904 | [3]: Velocity 905 | [4]: Intensity of reflection in dB or SNR 906 | """ 907 | # ADC sampling frequency 908 | fs: float = self.calibration.waveform.adc_sample_frequency 909 | 910 | # Frequency slope 911 | fslope: float = self.calibration.waveform.frequency_slope 912 | 913 | # Maximum range 914 | rmax: float = rdsp.get_max_range(fs, fslope) 915 | 916 | if RDSP_METHOD == "fesprit": 917 | pcl = self._fesprit() 918 | elif RDSP_METHOD == "tdp": 919 | pcl = self._td_processing() 920 | else: 921 | pcl = self._generate_radar_pcl() 922 | # Remove very close range 923 | pcl = pcl[pcl[:, 1] >= 1.5] 924 | 925 | # Exclude all points detected in the last range bins because 926 | # those detections are not reliable 927 | pcl = pcl[pcl[:, 1] < (0.95 * rmax)] 928 | pcl = pcl[pcl[:, 4] > np.max(pcl[:, 4]) * 0.4] 929 | if not polar: 930 | pcl = self._to_cartesian(pcl) 931 | return pcl 932 | 933 | def showPointcloudFromRaw(self, 934 | velocity_view: bool = False, 935 | bird_eye_view: bool = False, polar: bool = False, **kwargs) -> None: 936 | """Render pointcloud of detected object from radar signal processing. 937 | 938 | Arguments: 939 | bird_eye_view: Enable 2D Bird Eye View rendering 940 | """ 941 | # ADC sampling frequency 942 | fs: float = self.calibration.waveform.adc_sample_frequency 943 | 944 | # Frequency slope 945 | fslope: float = self.calibration.waveform.frequency_slope 946 | 947 | # Maximum range 948 | rmax: float = rdsp.get_max_range(fs, fslope) 949 | 950 | # Get pointclouds 951 | pcl = self.getPointcloudFromRaw(polar) 952 | 953 | if bird_eye_view: 954 | ax = plt.axes() 955 | ax.set_title(f"Radar BEV | Frame {self.index:04}") 956 | ax.scatter( 957 | pcl[:, 0], 958 | pcl[:, 1], 959 | pcl[:, 4], 960 | c=pcl[:, 4], 961 | cmap="viridis", 962 | ) 963 | ax.set_xlabel("Azimuth (m)") 964 | ax.set(facecolor="black") 965 | else: 966 | ax = plt.axes(projection="3d") 967 | ax.set_title(f"4D-FFT | Frame {self.index:04}") 968 | ax.set_zlabel("Elevation") 969 | map = ax.scatter( 970 | pcl[:, 0], 971 | pcl[:, 1], 972 | pcl[:, 2], 973 | c=pcl[:, 3] if velocity_view else pcl[:, 4], 974 | cmap=plt.cm.get_cmap(), 975 | s=pcl[:, 4] / 2, # Marker size 976 | ) 977 | plt.colorbar(map, ax=ax) 978 | 979 | ax.set_xlabel("Azimuth") 980 | ax.set_ylabel("Range") 981 | 982 | if polar: 983 | ax.set_xlim(-1, 1) 984 | else: 985 | ax.set_xlim(-rmax, rmax) 986 | ax.set_ylim(0, rmax) 987 | 988 | if kwargs.get("show", True): 989 | plt.show() 990 | 991 | def showHeatmapFromRaw(self, threshold: float, 992 | no_sidelobe: bool = False, 993 | velocity_view: bool = False, 994 | polar: bool = False, 995 | ranges: tuple[Optional[float], Optional[float]] = (None, None), 996 | azimuths: tuple[Optional[float], Optional[float]] = (None, None), 997 | **kwargs, 998 | ) -> None: 999 | """Render the heatmap processed from the raw radar ADC samples. 1000 | 1001 | Argument: 1002 | threshold: Threshold value for filtering the heatmap obtained 1003 | from the radar data processing 1004 | no_sidelobe: Flag used to skip the close range data from rendering 1005 | in order to avoid side lobes: 1006 | 1007 | Consider Range > ~1.5m 1008 | velocity_view: Render the heatmap using the velocity as the fourth 1009 | dimension. When false, the signal gain in dB is used 1010 | instead. 1011 | polar (bool): Flag to indicate that the pointcloud should be rendered 1012 | directly in the polar coordinate. When false, the 1013 | heatmap is converted into the cartesian coordinate 1014 | ranges (tuple): Min and max value of the range to render 1015 | Format: (min_range, max_range) 1016 | azimuths (tuple): Min and max value of azimuth to render 1017 | Format: (min_azimuth, max_azimuth) 1018 | kwargs (dict): Optional keyword arguments 1019 | "show": When false, prevent the rendered heatmap to be shown 1020 | """ 1021 | if self.raw is None: 1022 | info("No raw ADC samples available!") 1023 | return None 1024 | 1025 | signal_power = self._process_raw_adc() 1026 | 1027 | # Size of elevation, azimuth, doppler, and range bins 1028 | Ne, Na, Nv, Nr = signal_power.shape 1029 | # Range, Doppler, Azimuth and Elevation bins 1030 | rbins, vbins, abins, ebins = self._get_bins(Nr, Nv, Na, Ne) 1031 | 1032 | # Noise estimation 1033 | noise = np.quantile(signal_power, 0.95, axis=(3, 2, 1, 0), method='weibull') 1034 | dpcl = signal_power / noise 1035 | dpcl = 10 * np.log10(dpcl + 1) 1036 | dpcl /= np.max(dpcl) 1037 | 1038 | hmap = np.zeros((Ne * Na * Nv * Nr, 5)) 1039 | 1040 | # Range offset to count for the side lobes of the radar self.sensor 1041 | roffset: int = 10 1042 | 1043 | if no_sidelobe: 1044 | dpcl[:, :, :, 0:roffset] = 0.0 1045 | 1046 | ''' 1047 | NOTE 1048 | ----- 1049 | The time complexity of the nested loop below is in the order of: 1050 | Ne * Na* * Nv * ns (about N^4) 1051 | Hence it takes eons when higher resolution of rendering is used. 1052 | This motivated the vectorized implementation which is ~25 times faster. 1053 | 1054 | for elidx in range(Ne): 1055 | for aidx in range(Na): 1056 | for vidx in range(Nv): 1057 | for ridx in range(roffset, ns): 1058 | hmap_idx: int = ridx + ns * ( 1059 | vidx + Nv * ( aidx + Na * elidx) 1060 | ) 1061 | hmap[hmap_idx] = np.array([ 1062 | abins[aidx], 1063 | rbins[ridx], 1064 | ebins[elidx], 1065 | vbins[vidx], 1066 | dpcl[elidx, aidx, vidx, ridx], 1067 | ]) 1068 | ''' 1069 | hmap[:, 0] = np.repeat( 1070 | [np.repeat(abins, Nv * Nr)], 1071 | Ne, axis=0 1072 | ).reshape(-1) 1073 | hmap[:, 1] = np.repeat( 1074 | [np.repeat([np.repeat([rbins], Nv, axis=0)], Na, axis=0)], 1075 | Ne, axis=0 1076 | ).reshape(-1) 1077 | hmap[:, 2] = np.repeat(ebins, Na * Nv * Nr) 1078 | hmap[:, 3] = np.repeat( 1079 | [np.repeat([np.repeat(vbins, Nr)], Na, axis=0)], 1080 | Ne, axis=0 1081 | ).reshape(-1) 1082 | hmap[:, 4] = dpcl.reshape(-1) 1083 | 1084 | # Filtering ranges 1085 | min_range, max_range = ranges 1086 | if min_range is not None: 1087 | hmap = hmap[hmap[:, 1] > min_range] 1088 | if max_range is not None: 1089 | hmap = hmap[hmap[:, 1] < max_range] 1090 | 1091 | # Filtering azimuths 1092 | min_az, max_az= azimuths 1093 | if min_az is not None: 1094 | hmap = hmap[hmap[:, 0] > min_az] 1095 | if max_az is not None: 1096 | hmap = hmap[hmap[:, 0] < max_az] 1097 | 1098 | hmap = hmap[hmap[:, 4] > threshold] 1099 | hmap[:, 4] -= np.min(hmap[:, 4]) 1100 | hmap[:, 4] /= np.max(hmap[:, 4]) 1101 | 1102 | if not polar: 1103 | hmap = self._to_cartesian(hmap) 1104 | 1105 | ax = plt.axes(projection="3d") 1106 | ax.set_title("4D-FFT processing of raw ADC samples") 1107 | ax.set_xlabel("Azimuth") 1108 | ax.set_ylabel("Range") 1109 | ax.set_zlabel("Elevation") 1110 | if polar: 1111 | ax.set_xlim(np.min(abins), np.max(abins)) 1112 | else: 1113 | ax.set_xlim(-np.max(rbins), np.max(rbins)) 1114 | ax.set_ylim(np.min(rbins), np.max(rbins)) 1115 | ax.view_init(azim=-30, elev=45) 1116 | map = ax.scatter( 1117 | hmap[:, 0], 1118 | hmap[:, 1], 1119 | hmap[:, 2], 1120 | c=hmap[:, 3] if velocity_view else hmap[:, 4], 1121 | cmap=plt.cm.get_cmap(), 1122 | s=5.0, # Marker size 1123 | ) 1124 | plt.colorbar(map, ax=ax) 1125 | if kwargs.get("show", True): 1126 | plt.show() 1127 | 1128 | def show2dHeatmap(self, polar: bool = False, show: bool = True) -> None: 1129 | """Render 2D heatmap. 1130 | 1131 | Argument: 1132 | polar: Flag to indicate that the pointcloud should be rendered 1133 | directly in the polar coordinate. When false, the 1134 | heatmap is converted into the cartesian coordinate 1135 | show: Enable the rendered figure to be shown. 1136 | """ 1137 | if self.raw is None: 1138 | info("No raw ADC samples available!") 1139 | return None 1140 | 1141 | signal_power = self._process_raw_adc() 1142 | 1143 | # Size of elevation, azimuth, doppler, and range bins 1144 | Ne, Na, Nv, Nr = signal_power.shape 1145 | rbins, _, abins, _ = self._get_bins(Nr, None, Na, None) 1146 | 1147 | dpcl = np.sum(signal_power, (0, 2)) 1148 | noise = np.quantile(dpcl, 0.30, (0, 1)) 1149 | dpcl /= noise 1150 | dpcl = 10 * np.log10(dpcl + 1) 1151 | 1152 | # Number of close range bins to skip 1153 | roffset: int = 15 1154 | 1155 | if not polar: 1156 | """ 1157 | hmap = np.zeros((Na * Nr, 3)) 1158 | for aidx, _az in enumerate(abins): 1159 | for ridx, _r in enumerate(rbins[roffset:]): 1160 | hmap[ridx + roffset + Nr * aidx] = np.array([ 1161 | _r * np.sin(_az), # Azimuth 1162 | _r * np.cos(_az), # Range 1163 | dpcl[aidx, ridx + roffset], 1164 | ]) 1165 | 1166 | NOTE: 1167 | The use of kronecker product below is to implement a refactored 1168 | vectorized version of the for loop above. 1169 | """ 1170 | _r = np.kron(rbins[roffset:-roffset], np.cos(abins)) 1171 | _az = np.kron(rbins[roffset:-roffset], np.sin(abins)) 1172 | _pcl = np.transpose(dpcl, (1, 0))[roffset:-roffset, :].reshape(-1) 1173 | ax = plt.axes() 1174 | ax.scatter( 1175 | _az, # hmap[:, 0], 1176 | _r , # hmap[:, 1], 1177 | _pcl, # hmap[:, 2], 1178 | c=_pcl, # hmap[:, 2], 1179 | ) 1180 | ax.set_xlabel("Azimuth (m)") 1181 | ax.set(facecolor="black") 1182 | else: 1183 | dpcl = np.transpose(dpcl, (1, 0)) 1184 | az, rg = np.meshgrid(abins, rbins) 1185 | _, ax = plt.subplots() 1186 | color = ax.pcolormesh(az, rg, dpcl, cmap="viridis") 1187 | ax.set_xlabel("Azimuth (rad)") 1188 | 1189 | ax.set_ylabel("Range (m)") 1190 | ax.set_title(f"Frame {self.index:04}") 1191 | if show: 1192 | plt.show() 1193 | 1194 | 1195 | class CCRadar(SCRadar): 1196 | """Cascade Chip Radar. 1197 | 1198 | Attributes: 1199 | AZ_OS_CFAR_WS: Constant False Alarm Rate Window Size 1200 | AZ_OS_CFAR_GS: Constant False Alarm Rate Guard Cell 1201 | AZ_OS_CFAR_TOS: Constant False Alarm Rate Tos factor 1202 | """ 1203 | # 1D OS-CFAR Parameters used for peak selection in Azimuth-FFT 1204 | AZ_OS_CFAR_WS: int = 16 # Window size 1205 | AZ_OS_CFAR_GS: int = 8 # Guard cell 1206 | AZ_OS_CFAR_TOS: int = 4 # Tos factor 1207 | 1208 | AZIMUTH_FOV: float = np.deg2rad(180) 1209 | ELEVATION_FOV: float = np.deg2rad(20) 1210 | 1211 | 1212 | def _phase_calibration(self) -> np.array: 1213 | """Apply Phase calibration. 1214 | 1215 | Return: 1216 | Phase calibrated ADC samples 1217 | """ 1218 | # Phase calibrationm atrix 1219 | pm = np.array(self.calibration.phase.phase_calibration_matrix) 1220 | pm = pm[0] / pm 1221 | phase_calibrated_mtx = self.raw * pm.reshape( 1222 | self.calibration.phase.num_tx, 1223 | self.calibration.phase.num_rx, 1224 | 1, 1225 | 1 1226 | ) 1227 | return phase_calibrated_mtx 1228 | --------------------------------------------------------------------------------