├── .gitignore
├── DSP.py
├── LICENSE
├── README.md
├── STL3D
├── GestureradarV1.2.SLDPRT
└── GestureradarV1.2.STL
├── UI_interface.py
├── colortrans.py
├── config
├── IWR1843_cfg.cfg
└── IWR6843_cfg.cfg
├── dsp
├── ZoomFFT.py
├── __init__.py
├── angle_estimation.py
├── cfar.py
├── compensation.py
├── doppler_processing.py
├── music.py
├── noise_removal.py
├── range_processing.py
└── utils.py
├── firmware
├── xwr1843_mmw_demo.bin
├── xwr6443_mmw_demo.bin
├── xwr6843AOP_mmw_demo.bin
├── xwr6843ISK_mmw_demo.bin
└── xwr6843ODS_mmw_demo.bin
├── gesture_icons
├── 0.jpg
├── 1.jpg
├── 2.jpg
├── 3.jpg
├── 4.jpg
├── 5.jpg
├── 6.jpg
└── 7.jpg
├── globalvar.py
├── img
├── 1.png
├── 2.png
├── 3.png
├── 4.gif
├── 5.mp4
├── 6.jpg
└── 7.jpg
├── iwr6843_tlv
└── detected_points.py
├── libs
├── UDPCAPTUREADCRAWDATA.dll
├── UDPCAPTUREADCRAWDATA.lib
└── libtest.so
├── main.py
├── radar_config.py
└── real_time_process.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 |
7 | # Distribution / packaging
8 | .Python
9 | build/
10 | develop-eggs/
11 | dist/
12 | downloads/
13 | eggs/
14 | .eggs/
15 | lib/
16 | lib64/
17 | parts/
18 | sdist/
19 | var/
20 | wheels/
21 | *.egg-info/
22 | .installed.cfg
23 | *.egg
24 |
25 | # PyInstaller
26 | # Usually these files are written by a PyInstaller build script
27 | *.manifest
28 | *.spec
29 |
30 | # Installer logs
31 | pip-log.txt
32 | pip-delete-this-directory.txt
33 |
34 | # Unit test / coverage reports
35 | htmlcov/
36 | .tox/
37 | .coverage
38 | .coverage.*
39 | .cache
40 | nosetests.xml
41 | coverage.xml
42 | *.cover
43 | .hypothesis/
44 | .pytest_cache/
45 |
46 | # Translations
47 | *.mo
48 | *.pot
49 |
50 | # Django stuff:
51 | *.log
52 | local_settings.py
53 | db.sqlite3
54 |
55 | # Flask stuff:
56 | instance/
57 | .webassets-cache
58 |
59 | # Scrapy stuff:
60 | .scrapy
61 |
62 | # Sphinx documentation
63 | docs/_build/
64 |
65 | # PyBuilder
66 | target/
67 |
68 | # Jupyter Notebook
69 | .ipynb_checkpoints
70 |
71 | # pyenv
72 | .python-version
73 |
74 | # celery beat schedule file
75 | celerybeat-schedule
76 |
77 | # SageMath parsed files
78 | *.sage.py
79 |
80 | # Environments
81 | .env
82 | .venv
83 | env/
84 | venv/
85 | ENV/
86 | env.bak/
87 | venv.bak/
88 |
89 | # Spyder project settings
90 | .spyderproject
91 | .spyproject
92 |
93 | # Rope project settings
94 | .ropeproject
95 |
96 | # mkdocs documentation
97 | /site
98 |
99 | # mypy
100 | .mypy_cache/
101 |
102 | # PyCharm
103 | .idea/
104 | *.iml
105 | *.iws
106 | *.ipr
107 | *.iws
108 | .idea_modules/
109 |
110 | # VS Code
111 | .vscode/
112 | *.code-workspace
113 |
114 | # Project specific
115 | dataset/
116 | visualization/
117 | *.npy
118 | *.pth
119 | *.pt
120 | *.pkl
121 | *.h5
122 | *.hdf5
123 | *.model
124 | # *.bin
125 |
126 | # Windows specific
127 | Thumbs.db
128 | ehthumbs.db
129 | Desktop.ini
130 |
131 | # macOS specific
132 | .DS_Store
133 | .AppleDouble
134 | .LSOverride
135 | Icon
136 | ._*
137 | .DocumentRevisions-V100
138 | .fseventsd
139 | .Spotlight-V100
140 | .TemporaryItems
141 | .Trashes
142 | .VolumeIcon.icns
143 | .com.apple.timemachine.donotpresent
144 |
--------------------------------------------------------------------------------
/DSP.py:
--------------------------------------------------------------------------------
1 | # version: 1.0
2 |
3 | import numpy as np
4 | from collections import deque
5 | import dsp
6 | from dsp.doppler_processing import doppler_processing
7 | import dsp.range_processing as range_processing
8 | import dsp.angle_estimation as Angle_dsp
9 | import dsp.utils as utils
10 | import dsp.compensation as Compensation
11 | from dsp.utils import Window
12 | import globalvar as gl
13 |
14 |
15 | rti_queue = deque(maxlen=12)
16 | rdi_queue = deque(maxlen=12)
17 | rai_queue = deque(maxlen=12)
18 | rei_queue = deque(maxlen=12)
19 |
20 | gesturetimecnt = 0
21 | # 用于计数手悬停事件
22 |
23 | gesturetimecnt2 = 0
24 |
25 | NUM_TX = 3
26 | NUM_RX = 4
27 | VIRT_ANT = 4
28 | VIRT_ANT1 = 1
29 | # Data specific parameters
30 | NUM_CHIRPS = 64
31 | NUM_ADC_SAMPLES = 64
32 | RANGE_RESOLUTION = .0488
33 | DOPPLER_RESOLUTION = 0.0806
34 | NUM_FRAMES = 300
35 |
36 | # DSP processing parameters
37 | SKIP_SIZE = 4 # 忽略边缘角度的目标
38 | ANGLE_RES = 2 # 角度分辨率
39 | ANGLE_RANGE = 90 # 监督范围
40 | ANGLE_FFT_BINS= 64
41 | ANGLE_BINS = (ANGLE_RANGE * 2) // ANGLE_RES + 1
42 | BINS_PROCESSED = 64
43 |
44 | numRangeBins = NUM_ADC_SAMPLES
45 | numDopplerBins = NUM_CHIRPS
46 |
47 | # 计算分辨率
48 | range_resolution, bandwidth = dsp.range_resolution(NUM_ADC_SAMPLES)
49 | doppler_resolution = dsp.doppler_resolution(bandwidth)
50 |
51 | # Start DSP processing
52 | range_azimuth = np.zeros((int(ANGLE_BINS), BINS_PROCESSED))
53 | range_elevation = np.zeros((int(ANGLE_BINS), BINS_PROCESSED))
54 | azimuth_elevation = np.zeros((ANGLE_FFT_BINS, ANGLE_FFT_BINS, NUM_ADC_SAMPLES))
55 |
56 | num_vec, steering_vec = Angle_dsp.gen_steering_vec(ANGLE_RANGE, ANGLE_RES, VIRT_ANT) #theta跨度 theta分辨率 Vrx天线信号的数量
57 |
58 | def doppler_fft(x,window_type_2d=None):
59 |
60 | fft2d_in = np.transpose(x, axes=(1, 0)) # rangbin chirps
61 | if window_type_2d:
62 | fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=1)
63 |
64 | # 这里用zoom——FTT,是不是更加有效提取有效频率信息呢
65 | fft2d_out = np.fft.fft(fft2d_in, axis=1)#frame rangbin dopplerbin
66 | fft2d_log_abs = np.log2(np.abs(fft2d_out))
67 | # numADCSamples, numChirpsPerFrame
68 | det_matrix_vis = np.fft.fftshift(fft2d_log_abs, axes=1)
69 | return det_matrix_vis
70 |
71 | # 该函数主要是画三个图
72 | # RTI
73 | # DTI
74 | # ATI
75 |
76 | framecnt = 0
77 | def RDA_Time(adc_data, window_type_1d=None, clutter_removal_enabled=True, CFAR_enable=False, axis=-1):
78 |
79 | global gesturetimecnt, framecnt
80 | # 转换成(num_chirps_per_frame, num_rx_antennas, num_adc_samples)
81 | adc_data = np.transpose(adc_data, [0, 2, 1])
82 | # radar_cube = range_processing(adc_data, window_type_1d, 2)
83 | # 2倍抽取 降低dmax
84 | # radar_cube = range_processing(np.concatenate([adc_data[:,:,0:64:2],adc_data[:,:,0:64:2]], axis=2), window_type_1d, 2)
85 | radar_cube = range_processing(2*adc_data[:,:,0:64:2], window_type_1d, 2)
86 |
87 | if clutter_removal_enabled:
88 | radar_cube = Compensation.clutter_removal(radar_cube,axis=0)
89 |
90 |
91 | # 距离多普勒图
92 | range_doppler_fft, aoa_input = doppler_processing(radar_cube,
93 | num_tx_antennas=3,
94 | interleaved=False,
95 | clutter_removal_enabled=False, #前面已经做了
96 | window_type_2d=Window.HANNING,
97 | accumulate = False)
98 | # (numRangeBins, numVirtualAntennas, num_doppler_bins)
99 |
100 | rdi_abs = np.transpose(np.fft.fftshift(np.abs(range_doppler_fft), axes=2), [0, 2, 1])
101 | rdi_abs = np.flip(rdi_abs, axis=0)
102 | rdi_queue.append(rdi_abs)
103 | # 16个frame叠加返回
104 | rdi_framearray = np.array(rdi_queue)#frame chirps adcnum numVirtualAntennas
105 |
106 | # 角度图:
107 | # azimuth_elevation[0,0:4,:] = range_doppler_fft[:,[10,8,6,4],0].T
108 | # azimuth_elevation[1,0:4,:] = range_doppler_fft[:,[11,9,7,5],0].T
109 | # azimuth_elevation[2,0:2,:] = range_doppler_fft[:,[2,0],0].T
110 | # azimuth_elevation[3,0:2,:] = range_doppler_fft[:,[3,0],0].T
111 | # aei_raw = np.fft.fft2(azimuth_elevation, axes=[0, 1])
112 | # aei_raw = np.log2(np.abs(aei_raw))
113 | # 。range_doppler_fft
114 | # 距离时间图
115 | det_matrix = radar_cube[:, 0, :]
116 |
117 | #用距离图作为判断
118 | # [4:36,]是指4~36这个rangbin区间,有true的隔宿大于26个
119 | Iscapture = gl.get_value('IsRecognizeorCapture')
120 | # if(np.sum(det_matrix[2:36,:]>3e3)>26) and Iscapture:16
121 | if(np.sum(det_matrix[:,36:62]>3e3)>14) :
122 | if Iscapture:
123 | gesturetimecnt = gesturetimecnt + 1
124 | # print("有手势%d" %gesturetimecnt)
125 |
126 | if(gesturetimecnt>=2) and Iscapture:
127 | framecnt = framecnt + 1
128 | # framecnt是用来相当延迟多少帧再截图到judgegesture文件夹中
129 | if framecnt>=8:
130 | # print("进来了")
131 | # datetime.now()
132 | # a = datetime.now() #获得当前时间
133 | if gl.get_value('timer_2s'):
134 | gl.set_value('usr_gesture',True)
135 | gl.set_value('timer_2s',False)
136 | # print("有手势")
137 | framecnt = 0
138 | gesturetimecnt=0
139 |
140 |
141 | rti_queue.append(det_matrix)
142 | rti_framearray = np.array(rti_queue)#frame chirps adcnum
143 | rti_array = np.reshape(rti_framearray, (1, -1, 64))#chirps adcnum
144 | # (num_chirps_per_frame, num_range_bins, num_rx_antennas)
145 | rti_array_out = np.transpose(rti_array, [1, 2, 0])
146 |
147 | # 微多普勒时间图
148 | micro_doppler_data = np.zeros((rti_framearray.shape[0], rti_framearray.shape[1], rti_framearray.shape[2]), dtype=np.float64)
149 | micro_doppler_data_out = np.zeros((16,64), dtype=np.float64)
150 | for i, frame in enumerate(rti_framearray):
151 | # --- Show output
152 | det_matrix_vis = doppler_fft(frame,window_type_2d=Window.HANNING)
153 | micro_doppler_data[i,:,:] = det_matrix_vis
154 |
155 |
156 | rti_array_out = np.flip(np.abs(rti_array_out), axis=1)
157 | #
158 | rti_array_out[rti_array_out<3e3]=0
159 | # # 用RDI图判断信噪比强度64*64
160 | # if(np.sum(rti_array_out[0:1024:16,:,:]<100)>4090):
161 | # SNR = False
162 | # else:
163 | # SNR = True
164 |
165 | micro_doppler_data_out = micro_doppler_data.sum(axis=1)
166 |
167 | micro_doppler_data_out[micro_doppler_data_out<20]=0
168 |
169 | return rti_array_out, rdi_framearray, micro_doppler_data_out
170 |
171 |
172 |
173 | def Range_Angle(data, padding_size=None, clutter_removal_enabled=True, window_type_1d = Window.HANNING,Music_enable = False):
174 | # (0:TX1-RX1,1:TX1-RX2,2:TX1-RX3,3:TX1-RX4,| 4:TX2-RX1,5:TX2-RX2,6:TX2-RX3,7:TX2-RX4,| 8:TX3-RX1,9:TX3-RX2,10:TX3-RX3,11:TX3-RX4)
175 | # data = np.fft.fft2(data[:, :, [1,0,9,8]], s=[padding_size[0], padding_size[1]], axes=[0, 1])
176 | # global SNR
177 |
178 | # 转换成(num_chirps_per_frame, num_rx_antennas, num_adc_samples)
179 | adc_data = np.transpose(data, [0, 2, 1])
180 | # radar_cube = dsp.zoom_range_processing(adc_data, 0.1, 0.5, 1, 0, adc_data.shape[2])
181 | radar_cube = range_processing(2*adc_data[:,:,0:64:3], window_type_1d, 2)
182 |
183 | if clutter_removal_enabled:
184 | radar_cube = Compensation.clutter_removal(radar_cube,axis=0)
185 |
186 | # np.save('data.npy',radar_cube)
187 | frame_SNR = np.log(np.sum(np.abs(radar_cube[:,:])))-14.7
188 | if(np.abs(frame_SNR)<1.8):
189 | frame_SNR = 0
190 | # print(frame_SNR)
191 | # --- capon beamforming
192 | beamWeights = np.zeros((VIRT_ANT, BINS_PROCESSED), dtype=np.complex_)
193 |
194 | # Note that when replacing with generic doppler estimation functions, radarCube is interleaved and
195 | # has doppler at the last dimension.
196 | # 方位角
197 | for i in range(BINS_PROCESSED):
198 | if Music_enable:
199 | range_azimuth[:,i] = dsp.aoa_music_1D(steering_vec, radar_cube[:, [10,8,6,4], i].T, num_sources=1)
200 | else: #4,6,8,10#
201 | range_azimuth[:,i], beamWeights[:,i] = dsp.aoa_capon(radar_cube[:, [7,4,3,0], i].T, steering_vec, magnitude=True)
202 | # range_azimuth[:,i], beamWeights[:,i] = dsp.aoa_capon_new(radar_cube[:, [10,8,6,4], i].T,radar_cube[:, [10,8,6,4], i+1].T, steering_vec, magnitude=True)
203 | # 俯仰角
204 | for i in range(BINS_PROCESSED):
205 | if Music_enable:
206 | range_elevation[:,i] = dsp.aoa_music_1D(steering_vec, radar_cube[:, [1,0,9,8], i].T, num_sources=1)
207 | else:
208 | # radar_cube[:, [1,0,9,8], i].T*[[1],[-1],[1],[-1]] 不用这个,和导向矢量有关系把
209 | range_elevation[:,i], beamWeights[:,i] = dsp.aoa_capon(radar_cube[:, [7,6,11,10], i].T*[[1],[-1],[1],[-1]], steering_vec, magnitude=True)
210 |
211 | rdi_ab1 = np.flip(np.abs(range_azimuth), axis=1)
212 | # rdi_ab2 = np.fft.fftshift(range_elevation, axes=0)
213 | rdi_ab2 = np.flip(np.abs(range_elevation), axis=1)
214 | rdi_ab1 = np.minimum(rdi_ab1,rdi_ab1.max()/2)
215 | rdi_ab2 = np.minimum(rdi_ab2,rdi_ab2.max()/2)
216 | # 把不在手势范围的目标去除
217 | rdi_ab1[:,40:90] = 0
218 | rdi_ab2[:,40:90] = 0
219 | # rdi_ab1[:,13:19] = 0.1*rdi_ab1[:,13:19]
220 | # rdi_ab2[:,13:19] = 0.1*rdi_ab2[:,13:19]
221 | # rdi_ab1[:5,:] = 0
222 | # rdi_ab2[-5:,:] = 0
223 | # 加权 信噪比
224 | rdi_ab1 = rdi_ab1 / rdi_ab1.max() * frame_SNR
225 | rdi_ab2 = rdi_ab2 / rdi_ab2.max() * frame_SNR
226 |
227 | rai_queue.append(rdi_ab1)
228 | rei_queue.append(rdi_ab2)
229 | # 16个frame叠加返回
230 | rai_framearray = np.array(rai_queue)#frame chirps adcnum
231 | rei_framearray = np.array(rei_queue)#frame chirps adcnum
232 |
233 | return rai_framearray, rei_framearray
234 |
235 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Creative Commons Attribution-NonCommercial 4.0 International Public License
2 |
3 | By using this software, you agree to the following terms:
4 |
5 | You are free to:
6 | - Share — copy and redistribute the material in any medium or format
7 | - Adapt — remix, transform, and build upon the material
8 |
9 | Under the following terms:
10 | - Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made.
11 | - NonCommercial — You may not use the material for commercial purposes.
12 |
13 | No additional restrictions — You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.
14 |
15 | Full license text: https://creativecommons.org/licenses/by-nc/4.0/legalcode
16 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RadarStream
2 |
3 | RadarStream is a real-time RAWDATA acquisition, processing, and visualization system for TI MIMO mmWave radar series.
4 |
5 |
6 |
7 | https://github.com/user-attachments/assets/7ce99b51-a1af-4025-8a84-ee580eb92d04
8 |
9 | Demo1: Real-time Motion Detection and Radar Feature Visualization
10 |
11 |
12 | Demo2: Real-time Gesture Recognition System
13 |
14 |
15 | ## Project Overview
16 |
17 | This system supports Texas Instruments' MIMO mmWave radar series for real-time raw data acquisition, processing, and visualization. In addition to the RF evaluation board, the DCA1000EVM is required for data capture. Currently, the system has been tested with:
18 | - IWR6843ISK
19 | - IWR6843ISK-OBS
20 | - IWR1843ISK
21 |
22 | If you encounter any issues while using this project, please feel free to submit a pull request.
23 |
24 | ## Features ✨
25 |
26 | * **Real-time, Multi-threaded Radar Data Acquisition from TI MIMO mmWave Radar Sensors:**
27 | * Leveraging a **multi-threaded architecture 🧵** for data acquisition and processing.
28 | * To overcome Python's Global Interpreter Lock (GIL) and enable true multi-core processing, the data acquisition module is **wrapped in C 🚀**, ensuring near real-time, frame-loss-free data capture and handling.
29 | * **Multi-dimensional Feature Extraction:**
30 | * Range-Time Information (RTI)
31 | * Doppler-Time Information (DTI)
32 | * Range-Doppler Information (RDI)
33 | * Range-Azimuth Information (RAI)
34 | * Range-Elevation Information (REI)
35 | * **Interactive Visualization Interface**
36 |
37 | ## Requirements
38 |
39 | - Python 3.6+
40 | - PyQt5
41 | - PyQtGraph
42 | - NumPy
43 | - PyTorch
44 | - Matplotlib
45 | - Serial
46 |
47 | ## Hardware Requirements
48 |
49 | - TI MIMO mmWave Radar Sensor (tested with IWR6843ISK and IWR6843ISK-OBS)
50 | - DCA1000 EVM (essential for raw data capture)
51 | - PC with Windows OS
52 |
53 | ## Firmware Requirements
54 | The firmware must be selected from the `mmwave_industrial_toolbox_4_10_1\labs\Out_Of_Box_Demo\prebuilt_binaries/` directory inside any version of the mmwave_industrial_toolbox.
55 | There is no strict requirement to use version 4.10.1.
56 |
57 | ## Setup and Installation
58 |
59 | 1. Clone this repository
60 | 2. Install the required dependencies:
61 | ```
62 | pip install pyqt5 pyqtgraph numpy torch matplotlib pyserial
63 | ```
64 | 3. Connect the mmWave radar sensor and DCA1000 EVM to your computer (only need a 5V 3A DC power wire, a Ethernet Cable, and a micro USB wire)
65 | 4. Configure the network IPv4 settings (referencing the IPv4 configuration process from using mmWaveStudio for the DCA1000 EVM)
66 |
67 | Two different acquisition methods are shown here: one figure displays Raspberry Pi 4B acquisition, while the other demonstrates Windows-based acquisition. However, the Raspberry Pi acquisition has very few frames during real-time processing and display, making it prone to data loss. (not recommended to use Raspberry Pi for acquisition)
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 | ## 3D Printed Mount
77 |
78 | The repository includes STL files for a 3D printed structure designed to mount and secure the DCA1000EVM board.
79 |
80 | **Note:** You will need some M3 size nylon standoffs and screws for assembly.
81 |
82 |
83 |
84 |
85 |
86 | ## Usage
87 |
88 | 1. Run the main application:
89 | ```
90 | python main.py
91 | ```
92 | 2. Select the appropriate COM port for the radar CLI interface
93 | 3. Choose a radar configuration file
94 | 4. Click "Send Config" to initialize the radar
95 | 5. Use the interface to:
96 | - Visualize radar data in real-time
97 | - Capture training data for machine learning models
98 |
99 |
100 | ## Project Structure
101 | - `config/`: Configuration files for different radar settings
102 | - `gesture_icons/`: Gesture icons for visualization
103 | - `libs/`: Library files for radar communication
104 | - `STL3D`: 3D printed mount STL files
105 | - `main.py`: Main application entry point
106 | - `real_time_process.py`: Real-time data processing
107 | - `radar_config.py`: Radar configuration utilities
108 | - `iwr6843_tlv/`: TLV protocol implementation for IWR6843
109 | - `dsp/`: Digital signal processing modules
110 | - `UI_interface.py`: PyQt5 user interface
111 |
112 |
113 | ## Citation
114 |
115 | If this project helps your research, please consider citing our papers that are closely related to this tool:
116 |
117 | ```
118 |
119 | ```
120 |
121 |
122 | ## Acknowledgements
123 |
124 | This project references and builds upon:
125 | - [real-time-radar](https://github.com/AndyYu0010/real-time-radar) by AndyYu0010
126 | - [OpenRadar](https://github.com/PreSenseRadar/OpenRadar) - specifically the DSP module
127 |
128 | ## TODO
129 |
130 | Future improvements planned for this project:
131 | - [ ] Validate compatibility with more RF evaluation boards
132 | - [ ] Migrate from PyQt5 to PySide6
133 | - [ ] Make the API in the libs folder more flexible
134 |
--------------------------------------------------------------------------------
/STL3D/GestureradarV1.2.SLDPRT:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/STL3D/GestureradarV1.2.SLDPRT
--------------------------------------------------------------------------------
/STL3D/GestureradarV1.2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/STL3D/GestureradarV1.2.STL
--------------------------------------------------------------------------------
/UI_interface.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'GestureRecognize.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.12.3
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from PyQt5 import QtCore, QtGui, QtWidgets
10 | from PyQt5.QtCore import pyqtSignal, Qt
11 | from PyQt5.QtWidgets import QComboBox
12 | import sys
13 | from pyqtgraph import GraphicsLayoutWidget
14 | import globalvar as gl
15 | import pyqtgraph as pg
16 | import os
17 | import matplotlib.cm
18 | import time
19 | import sys
20 | import os
21 | from PyQt5.QtGui import *
22 | from PyQt5.QtWidgets import *
23 | gl._init()
24 | gl.set_value('usr_gesture', False)
25 |
26 |
27 | radarRFconfigpathfile = 'config'
28 | modelpathfile = 'save_model'
29 |
30 |
31 | # ----新的combox 单机框任何地方都有响应 ---- #
32 | class ClickedComboBox(QComboBox):
33 | arrowClicked = pyqtSignal()
34 |
35 | def showPopup(self): # 重写showPopup函数
36 | super(ClickedComboBox, self).showPopup()
37 | self.arrowClicked.emit()
38 |
39 | # 最小化窗口
40 | class Qt_pet(QWidget):
41 |
42 | def __init__(self,MainWindow):
43 | super(Qt_pet, self).__init__()
44 | self.MainWindow = MainWindow
45 | self.dis_file = "gesture_icons/"
46 | self.windowinit()
47 |
48 | self.pos_first = self.pos()
49 | # self.timer.timeout.connect(self.img_update)
50 |
51 | def img_update(self,img_path):
52 | self.img_path = img_path
53 | self.qpixmap = QPixmap(self.img_path).scaled(256, 256)
54 | self.lab.setPixmap(self.qpixmap)
55 |
56 | def windowinit(self):
57 | self.x = 800
58 | self.y = 600
59 | self.setGeometry(self.x, self.y, 256, 256)
60 | self.img_path = 'gesture_icons/7.jpg'
61 | self.lab = QLabel(self)
62 | self.qpixmap = QPixmap(self.img_path).scaled(256, 256)
63 | self.lab.setPixmap(self.qpixmap)
64 | self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint | Qt.SubWindow)
65 | self.setAutoFillBackground(False)
66 | self.setAttribute(Qt.WA_TranslucentBackground, True)
67 | # self.show()
68 |
69 | def mousePressEvent(self, QMouseEvent):
70 | if QMouseEvent.button() == Qt.LeftButton:
71 | self.pos_first = QMouseEvent.globalPos() - self.pos()
72 | QMouseEvent.accept()
73 | self.setCursor(QCursor(Qt.OpenHandCursor))
74 | if QMouseEvent.button() == Qt.RightButton:
75 | self.MainWindow.show()
76 | self.hide()
77 |
78 | def mouseMoveEvent(self, QMouseEvent):
79 | if Qt.LeftButton:
80 | self.move(QMouseEvent.globalPos() - self.pos_first)
81 | # print(self.pos())
82 | self.x, self.y = self.pos().x, self.pos().y
83 | QMouseEvent.accept()
84 |
85 |
86 |
87 | class Ui_MainWindow(object):
88 | def setupUi(self, MainWindow):
89 | MainWindow.setObjectName("MainWindow")
90 | MainWindow.resize(1280, 800)
91 | pg.setConfigOption('background', '#f0f0f0')
92 | pg.setConfigOption('foreground', 'd')
93 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
94 | sizePolicy.setHorizontalStretch(5)
95 | sizePolicy.setVerticalStretch(0)
96 | sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
97 | MainWindow.setSizePolicy(sizePolicy)
98 | MainWindow.setMinimumSize(QtCore.QSize(0, 0))
99 | self.centralwidget = QtWidgets.QWidget(MainWindow)
100 | self.centralwidget.setObjectName("centralwidget")
101 | self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.centralwidget)
102 | self.horizontalLayout_4.setObjectName("horizontalLayout_4")
103 | self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
104 | self.tabWidget.setObjectName("tabWidget")
105 | self.tab = QtWidgets.QWidget()
106 | self.tab.setObjectName("tab")
107 | self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab)
108 | self.verticalLayout_3.setObjectName("verticalLayout_3")
109 | self.splitter = QtWidgets.QSplitter(self.tab)
110 | self.splitter.setEnabled(True)
111 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
112 | sizePolicy.setHorizontalStretch(0)
113 | sizePolicy.setVerticalStretch(0)
114 | sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
115 | self.splitter.setSizePolicy(sizePolicy)
116 | self.splitter.setAutoFillBackground(False)
117 | self.splitter.setOrientation(QtCore.Qt.Horizontal)
118 | self.splitter.setObjectName("splitter")
119 | self.groupBox_11 = QtWidgets.QGroupBox(self.splitter)
120 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
121 | sizePolicy.setHorizontalStretch(5)
122 | sizePolicy.setVerticalStretch(15)
123 | sizePolicy.setHeightForWidth(self.groupBox_11.sizePolicy().hasHeightForWidth())
124 | self.groupBox_11.setSizePolicy(sizePolicy)
125 | self.groupBox_11.setMinimumSize(QtCore.QSize(0, 0))
126 | self.groupBox_11.setMaximumSize(QtCore.QSize(2400, 8000))
127 | self.groupBox_11.setObjectName("groupBox_11")
128 | self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox_11)
129 | self.verticalLayout_4.setContentsMargins(6, 6, 6, 6)
130 | self.verticalLayout_4.setObjectName("verticalLayout_4")
131 | self.groupBox_2 = QtWidgets.QGroupBox(self.groupBox_11)
132 | self.groupBox_2.setObjectName("groupBox_2")
133 | self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_2)
134 | self.verticalLayout.setContentsMargins(6, 6, 6, 6)
135 | self.verticalLayout.setObjectName("verticalLayout")
136 | self.gridLayout_3 = QtWidgets.QGridLayout()
137 | self.gridLayout_3.setObjectName("gridLayout_3")
138 | self.label_15 = QtWidgets.QLabel(self.groupBox_2)
139 | self.label_15.setObjectName("label_15")
140 | self.gridLayout_3.addWidget(self.label_15, 3, 0, 1, 1)
141 | self.label_16 = QtWidgets.QLabel(self.groupBox_2)
142 | self.label_16.setObjectName("label_16")
143 | self.gridLayout_3.addWidget(self.label_16, 3, 1, 1, 1)
144 | self.label_36 = QtWidgets.QLabel(self.groupBox_2)
145 | self.label_36.setObjectName("label_36")
146 | self.gridLayout_3.addWidget(self.label_36, 4, 0, 1, 1)
147 | self.label_14 = QtWidgets.QLabel(self.groupBox_2)
148 | self.label_14.setObjectName("label_14")
149 | self.gridLayout_3.addWidget(self.label_14, 1, 1, 1, 1)
150 | self.label_43 = QtWidgets.QLabel(self.groupBox_2)
151 | self.label_43.setObjectName("label_43")
152 | self.gridLayout_3.addWidget(self.label_43, 6, 0, 1, 1)
153 | self.label_13 = QtWidgets.QLabel(self.groupBox_2)
154 | self.label_13.setObjectName("label_13")
155 | self.gridLayout_3.addWidget(self.label_13, 1, 0, 1, 1)
156 | self.label_35 = QtWidgets.QLabel(self.groupBox_2)
157 | self.label_35.setObjectName("label_35")
158 | self.gridLayout_3.addWidget(self.label_35, 2, 1, 1, 1)
159 | self.comboBox_7 = ClickedComboBox(self.groupBox_2)
160 | self.comboBox_7.setObjectName("comboBox_7")
161 | self.gridLayout_3.addWidget(self.comboBox_7, 0, 1, 1, 1)
162 | self.label_45 = QtWidgets.QLabel(self.groupBox_2)
163 | self.label_45.setAlignment(QtCore.Qt.AlignCenter)
164 | self.label_45.setObjectName("label_45")
165 | self.gridLayout_3.addWidget(self.label_45, 5, 0, 1, 2)
166 | self.comboBox_9 = QtWidgets.QComboBox(self.groupBox_2)
167 | self.comboBox_9.setEnabled(False)
168 | self.comboBox_9.setEditable(False)
169 | self.comboBox_9.setObjectName("comboBox_9")
170 | self.gridLayout_3.addWidget(self.comboBox_9, 7, 1, 1, 1)
171 | self.label_12 = QtWidgets.QLabel(self.groupBox_2)
172 | self.label_12.setObjectName("label_12")
173 | self.gridLayout_3.addWidget(self.label_12, 0, 0, 1, 1)
174 | self.label_44 = QtWidgets.QLabel(self.groupBox_2)
175 | self.label_44.setEnabled(False)
176 | self.label_44.setObjectName("label_44")
177 | self.gridLayout_3.addWidget(self.label_44, 7, 0, 1, 1)
178 | self.label_17 = QtWidgets.QLabel(self.groupBox_2)
179 | self.label_17.setObjectName("label_17")
180 | self.gridLayout_3.addWidget(self.label_17, 2, 0, 1, 1)
181 | self.label_37 = QtWidgets.QLabel(self.groupBox_2)
182 | self.label_37.setObjectName("label_37")
183 | self.gridLayout_3.addWidget(self.label_37, 4, 1, 1, 1)
184 | self.comboBox_8 = ClickedComboBox(self.groupBox_2)
185 | self.comboBox_8.setObjectName("comboBox_8")
186 | self.gridLayout_3.addWidget(self.comboBox_8, 6, 1, 1, 1)
187 | self.pushButton_11 = QtWidgets.QPushButton(self.groupBox_2)
188 | self.pushButton_11.setObjectName("pushButton_11")
189 | self.pushButton_12 = QtWidgets.QPushButton(self.groupBox_2)
190 | self.pushButton_12.setObjectName("pushButton_12")
191 | self.gridLayout_3.addWidget(self.pushButton_11, 8, 1, 1, 1)
192 | self.gridLayout_3.addWidget(self.pushButton_12, 8, 0, 1, 1)
193 | self.verticalLayout.addLayout(self.gridLayout_3)
194 | self.verticalLayout_4.addWidget(self.groupBox_2)
195 | self.groupBox_7 = QtWidgets.QGroupBox(self.groupBox_11)
196 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
197 | sizePolicy.setHorizontalStretch(0)
198 | sizePolicy.setVerticalStretch(2)
199 | sizePolicy.setHeightForWidth(self.groupBox_7.sizePolicy().hasHeightForWidth())
200 | self.groupBox_7.setSizePolicy(sizePolicy)
201 | self.groupBox_7.setObjectName("groupBox_7")
202 | self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.groupBox_7)
203 | self.verticalLayout_7.setContentsMargins(6, 6, 6, 6)
204 | self.verticalLayout_7.setObjectName("verticalLayout_7")
205 | self.horizontalLayout = QtWidgets.QHBoxLayout()
206 | self.horizontalLayout.setObjectName("horizontalLayout")
207 | self.label_5 = QtWidgets.QLabel(self.groupBox_7)
208 | self.label_5.setObjectName("label_5")
209 | self.horizontalLayout.addWidget(self.label_5)
210 | self.comboBox = ClickedComboBox(self.groupBox_7)
211 | self.comboBox.setObjectName("comboBox")
212 | self.horizontalLayout.addWidget(self.comboBox)
213 | self.verticalLayout_7.addLayout(self.horizontalLayout)
214 | self.groupBox_3 = QtWidgets.QGroupBox(self.groupBox_7)
215 | self.groupBox_3.setFlat(True)
216 | self.groupBox_3.setCheckable(True)
217 | self.groupBox_3.setChecked(False)
218 | self.groupBox_3.setObjectName("groupBox_3")
219 | self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.groupBox_3)
220 | self.verticalLayout_5.setContentsMargins(2, 6, 2, 6)
221 | self.verticalLayout_5.setObjectName("verticalLayout_5")
222 | self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
223 | self.horizontalLayout_3.setObjectName("horizontalLayout_3")
224 | self.label_2 = QtWidgets.QLabel(self.groupBox_3)
225 | self.label_2.setObjectName("label_2")
226 | self.horizontalLayout_3.addWidget(self.label_2)
227 | self.comboBox_2 = ClickedComboBox(self.groupBox_3)
228 | self.comboBox_2.setObjectName("comboBox_2")
229 | self.horizontalLayout_3.addWidget(self.comboBox_2)
230 | self.verticalLayout_5.addLayout(self.horizontalLayout_3)
231 | self.pushButton_15 = QtWidgets.QPushButton(self.groupBox_3)
232 | self.pushButton_15.setObjectName("pushButton_15")
233 | self.pushButton_15.setCheckable(True)
234 | self.verticalLayout_5.addWidget(self.pushButton_15)
235 | self.verticalLayout_7.addWidget(self.groupBox_3)
236 | self.groupBox_4 = QtWidgets.QGroupBox(self.groupBox_7)
237 | self.groupBox_4.setFlat(True)
238 | self.groupBox_4.setCheckable(True)
239 | self.groupBox_4.setChecked(False)
240 | self.groupBox_4.setObjectName("groupBox_4")
241 | self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.groupBox_4)
242 | self.verticalLayout_6.setContentsMargins(2, 6, 2, 6)
243 | self.verticalLayout_6.setObjectName("verticalLayout_6")
244 | self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
245 | self.horizontalLayout_5.setObjectName("horizontalLayout_5")
246 | self.label_3 = QtWidgets.QLabel(self.groupBox_4)
247 | self.label_3.setObjectName("label_3")
248 | self.horizontalLayout_5.addWidget(self.label_3)
249 | self.lineEdit_6 = QtWidgets.QLineEdit(self.groupBox_4)
250 | self.lineEdit_6.setObjectName("lineEdit_6")
251 | self.horizontalLayout_5.addWidget(self.lineEdit_6)
252 | self.comboBox_3 = QtWidgets.QComboBox(self.groupBox_4)
253 | self.comboBox_3.setObjectName("comboBox_3")
254 | self.comboBox_3.addItem("")
255 | self.comboBox_3.addItem("")
256 | self.comboBox_3.addItem("")
257 | self.comboBox_3.addItem("")
258 | self.comboBox_3.addItem("")
259 | self.comboBox_3.addItem("")
260 | self.comboBox_3.addItem("")
261 | self.horizontalLayout_5.addWidget(self.comboBox_3)
262 | self.verticalLayout_6.addLayout(self.horizontalLayout_5)
263 | self.pushButton = QtWidgets.QPushButton(self.groupBox_4)
264 | self.pushButton.setCheckable(True)
265 | self.pushButton.setObjectName("pushButton")
266 | self.verticalLayout_6.addWidget(self.pushButton)
267 | self.verticalLayout_7.addWidget(self.groupBox_4)
268 | self.verticalLayout_4.addWidget(self.groupBox_7)
269 | self.groupBox = QtWidgets.QGroupBox(self.groupBox_11)
270 | self.groupBox.setObjectName("groupBox")
271 | self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
272 | self.verticalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
273 | self.verticalLayout_2.setContentsMargins(5, 5, 5, 5)
274 | self.verticalLayout_2.setSpacing(0)
275 | self.verticalLayout_2.setObjectName("verticalLayout_2")
276 | self.textEdit = QtWidgets.QTextEdit(self.groupBox)
277 | self.textEdit.setFrameShape(QtWidgets.QFrame.StyledPanel)
278 | self.textEdit.setFrameShadow(QtWidgets.QFrame.Sunken)
279 | self.textEdit.setTabStopWidth(80)
280 | self.textEdit.setObjectName("textEdit")
281 | self.verticalLayout_2.addWidget(self.textEdit)
282 | self.verticalLayout_4.addWidget(self.groupBox)
283 | self.groupBox_9 = QtWidgets.QGroupBox(self.splitter)
284 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
285 | sizePolicy.setHorizontalStretch(10)
286 | sizePolicy.setVerticalStretch(15)
287 | sizePolicy.setHeightForWidth(self.groupBox_9.sizePolicy().hasHeightForWidth())
288 | self.groupBox_9.setSizePolicy(sizePolicy)
289 | self.groupBox_9.setObjectName("groupBox_9")
290 | self.gridLayout = QtWidgets.QGridLayout(self.groupBox_9)
291 | self.gridLayout.setContentsMargins(11, 11, 11, 11)
292 | self.gridLayout.setHorizontalSpacing(7)
293 | self.gridLayout.setObjectName("gridLayout")
294 | self.label = QtWidgets.QLabel(self.groupBox_9)
295 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
296 | sizePolicy.setHorizontalStretch(0)
297 | sizePolicy.setVerticalStretch(0)
298 | sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
299 | self.label.setSizePolicy(sizePolicy)
300 | self.label.setAlignment(QtCore.Qt.AlignCenter)
301 | self.label.setObjectName("label")
302 | self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
303 | self.label_7 = QtWidgets.QLabel(self.groupBox_9)
304 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
305 | sizePolicy.setHorizontalStretch(0)
306 | sizePolicy.setVerticalStretch(0)
307 | sizePolicy.setHeightForWidth(self.label_7.sizePolicy().hasHeightForWidth())
308 | self.label_7.setSizePolicy(sizePolicy)
309 | self.label_7.setAlignment(QtCore.Qt.AlignCenter)
310 | self.label_7.setObjectName("label_7")
311 | self.gridLayout.addWidget(self.label_7, 0, 1, 1, 1)
312 | self.label_8 = QtWidgets.QLabel(self.groupBox_9)
313 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
314 | sizePolicy.setHorizontalStretch(0)
315 | sizePolicy.setVerticalStretch(0)
316 | sizePolicy.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
317 | self.label_8.setSizePolicy(sizePolicy)
318 | self.label_8.setAlignment(QtCore.Qt.AlignCenter)
319 | self.label_8.setObjectName("label_8")
320 | self.gridLayout.addWidget(self.label_8, 0, 2, 1, 1)
321 | self.graphicsView_4 = GraphicsLayoutWidget(self.groupBox_9)
322 | self.graphicsView_4.setEnabled(True)
323 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
324 | sizePolicy.setHorizontalStretch(0)
325 | sizePolicy.setVerticalStretch(1)
326 | sizePolicy.setHeightForWidth(self.graphicsView_4.sizePolicy().hasHeightForWidth())
327 | self.graphicsView_4.setSizePolicy(sizePolicy)
328 | self.graphicsView_4.setMinimumSize(QtCore.QSize(255, 255))
329 | self.graphicsView_4.setMaximumSize(QtCore.QSize(255, 255))
330 | self.graphicsView_4.setObjectName("graphicsView_4")
331 | self.gridLayout.addWidget(self.graphicsView_4, 3, 1, 1, 1)
332 | self.graphicsView_2 = GraphicsLayoutWidget(self.groupBox_9)
333 | self.graphicsView_2.setEnabled(True)
334 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
335 | sizePolicy.setHorizontalStretch(0)
336 | sizePolicy.setVerticalStretch(1)
337 | sizePolicy.setHeightForWidth(self.graphicsView_2.sizePolicy().hasHeightForWidth())
338 | self.graphicsView_2.setSizePolicy(sizePolicy)
339 | self.graphicsView_2.setMinimumSize(QtCore.QSize(255, 255))
340 | self.graphicsView_2.setMaximumSize(QtCore.QSize(255, 255))
341 | self.graphicsView_2.setObjectName("graphicsView_2")
342 | self.gridLayout.addWidget(self.graphicsView_2, 1, 1, 1, 1)
343 | self.label_9 = QtWidgets.QLabel(self.groupBox_9)
344 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
345 | sizePolicy.setHorizontalStretch(0)
346 | sizePolicy.setVerticalStretch(0)
347 | sizePolicy.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
348 | self.label_9.setSizePolicy(sizePolicy)
349 | self.label_9.setAlignment(QtCore.Qt.AlignCenter)
350 | self.label_9.setObjectName("label_9")
351 | self.gridLayout.addWidget(self.label_9, 2, 1, 1, 1)
352 | self.graphicsView = GraphicsLayoutWidget(self.groupBox_9)
353 | self.graphicsView.setEnabled(True)
354 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
355 | sizePolicy.setHorizontalStretch(0)
356 | sizePolicy.setVerticalStretch(1)
357 | sizePolicy.setHeightForWidth(self.graphicsView.sizePolicy().hasHeightForWidth())
358 | self.graphicsView.setSizePolicy(sizePolicy)
359 | self.graphicsView.setMinimumSize(QtCore.QSize(255, 255))
360 | self.graphicsView.setMaximumSize(QtCore.QSize(255, 255))
361 | self.graphicsView.setObjectName("graphicsView")
362 | self.gridLayout.addWidget(self.graphicsView, 1, 0, 1, 1)
363 | self.graphicsView_3 = GraphicsLayoutWidget(self.groupBox_9)
364 | self.graphicsView_3.setEnabled(True)
365 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
366 | sizePolicy.setHorizontalStretch(0)
367 | sizePolicy.setVerticalStretch(1)
368 | sizePolicy.setHeightForWidth(self.graphicsView_3.sizePolicy().hasHeightForWidth())
369 | self.graphicsView_3.setSizePolicy(sizePolicy)
370 | self.graphicsView_3.setMinimumSize(QtCore.QSize(255, 255))
371 | self.graphicsView_3.setMaximumSize(QtCore.QSize(255, 255))
372 | self.graphicsView_3.setObjectName("graphicsView_3")
373 | self.gridLayout.addWidget(self.graphicsView_3, 1, 2, 1, 1)
374 | self.graphicsView_5 = QtWidgets.QLabel(self.groupBox_9)
375 | self.graphicsView_5.setStyleSheet('border-width: 2px;border-style: solid;border-color: rgb(255, 170, 0);background-color: rgb(180,180,180);')
376 | self.graphicsView_5.setEnabled(True)
377 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
378 | sizePolicy.setHorizontalStretch(0)
379 | sizePolicy.setVerticalStretch(1)
380 | sizePolicy.setHeightForWidth(self.graphicsView_5.sizePolicy().hasHeightForWidth())
381 | self.graphicsView_5.setSizePolicy(sizePolicy)
382 | self.graphicsView_5.setMinimumSize(QtCore.QSize(255, 255))
383 | self.graphicsView_5.setMaximumSize(QtCore.QSize(255, 255))
384 | self.graphicsView_5.setObjectName("graphicsView_5")
385 | self.gridLayout.addWidget(self.graphicsView_5, 3, 2, 1, 1)
386 | self.label_11 = QtWidgets.QLabel(self.groupBox_9)
387 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
388 | sizePolicy.setHorizontalStretch(0)
389 | sizePolicy.setVerticalStretch(0)
390 | sizePolicy.setHeightForWidth(self.label_11.sizePolicy().hasHeightForWidth())
391 | self.label_11.setSizePolicy(sizePolicy)
392 | self.label_11.setAlignment(QtCore.Qt.AlignCenter)
393 | self.label_11.setObjectName("label_11")
394 | self.gridLayout.addWidget(self.label_11, 2, 0, 1, 1)
395 | self.label_10 = QtWidgets.QLabel(self.groupBox_9)
396 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
397 | sizePolicy.setHorizontalStretch(0)
398 | sizePolicy.setVerticalStretch(0)
399 | sizePolicy.setHeightForWidth(self.label_10.sizePolicy().hasHeightForWidth())
400 | self.label_10.setSizePolicy(sizePolicy)
401 | self.label_10.setAlignment(QtCore.Qt.AlignCenter)
402 | self.label_10.setObjectName("label_10")
403 | self.gridLayout.addWidget(self.label_10, 2, 2, 1, 1)
404 | self.graphicsView_6 = GraphicsLayoutWidget(self.groupBox_9)
405 | self.graphicsView_6.setEnabled(True)
406 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
407 | sizePolicy.setHorizontalStretch(0)
408 | sizePolicy.setVerticalStretch(1)
409 | sizePolicy.setHeightForWidth(self.graphicsView_6.sizePolicy().hasHeightForWidth())
410 | self.graphicsView_6.setSizePolicy(sizePolicy)
411 | self.graphicsView_6.setMinimumSize(QtCore.QSize(255, 255))
412 | self.graphicsView_6.setMaximumSize(QtCore.QSize(255, 255))
413 | self.graphicsView_6.setObjectName("graphicsView_6")
414 | self.gridLayout.addWidget(self.graphicsView_6, 3, 0, 1, 1)
415 | self.gridLayout.setRowStretch(0, 1)
416 | self.gridLayout.setRowStretch(1, 4)
417 | self.gridLayout.setRowStretch(2, 1)
418 | self.gridLayout.setRowStretch(3, 4)
419 | self.verticalLayout_3.addWidget(self.splitter)
420 |
421 | # 比例 控制 splitter 内 控件的比例
422 | self.splitter.addWidget(self.groupBox_9)#'你的第一个子widget'
423 | self.splitter.addWidget(self.groupBox_11)#'你的第二个子widget'
424 | self.splitter.setSizes([960,320])#直接写入数字列表
425 |
426 | self.tabWidget.addTab(self.tab, "")
427 | self.tab_2 = QtWidgets.QWidget()
428 | self.tab_2.setObjectName("tab_2")
429 | self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.tab_2)
430 | self.verticalLayout_8.setObjectName("verticalLayout_8")
431 | self.label_4 = QtWidgets.QLabel(self.tab_2)
432 | self.label_4.setAlignment(QtCore.Qt.AlignCenter)
433 | # self.label_4.setPixmap(QtGui.QPixmap("visualization/depict.jpg"))
434 | self.label_4.setObjectName("label_4")
435 | self.verticalLayout_8.addWidget(self.label_4)
436 | self.tabWidget.addTab(self.tab_2, "")
437 | self.horizontalLayout_4.addWidget(self.tabWidget)
438 | MainWindow.setCentralWidget(self.centralwidget)
439 | self.menubar = QtWidgets.QMenuBar(MainWindow)
440 | self.menubar.setGeometry(QtCore.QRect(0, 0, 1079, 22))
441 | self.menubar.setObjectName("menubar")
442 | self.menu = QtWidgets.QMenu(self.menubar)
443 | self.menu.setObjectName("menu")
444 | MainWindow.setMenuBar(self.menubar)
445 | self.statusbar = QtWidgets.QStatusBar(MainWindow)
446 | self.statusbar.setObjectName("statusbar")
447 | MainWindow.setStatusBar(self.statusbar)
448 | self.actionload = QtWidgets.QAction(MainWindow)
449 | self.actionload.setObjectName("actionload")
450 | self.menu.addSeparator()
451 | self.menu.addAction(self.actionload)
452 | self.menu.addSeparator()
453 | self.menubar.addAction(self.menu.menuAction())
454 |
455 | self.retranslateUi(MainWindow)
456 | self.tabWidget.setCurrentIndex(0)
457 | self.slot_init()
458 | QtCore.QMetaObject.connectSlotsByName(MainWindow)
459 |
460 | def retranslateUi(self, MainWindow):
461 | _translate = QtCore.QCoreApplication.translate
462 | MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
463 | self.groupBox_11.setTitle(_translate("MainWindow", "配置"))
464 | self.groupBox_2.setTitle(_translate("MainWindow", "雷达配置"))
465 | self.label_15.setText(_translate("MainWindow", "最大距离:"))
466 | self.label_16.setText(_translate("MainWindow", "80m"))
467 | self.label_36.setText(_translate("MainWindow", "最大速度:"))
468 | self.label_14.setText(_translate("MainWindow", "3.75cm"))
469 | self.label_43.setText(_translate("MainWindow", "CLIPort:"))
470 | self.label_13.setText(_translate("MainWindow", "距离分辨率:"))
471 | self.label_35.setText(_translate("MainWindow", "2m/s"))
472 | self.comboBox_7.setToolTip(_translate("MainWindow", "配置文件放到config文件夹下"))
473 | self.label_45.setText(_translate("MainWindow", "其他待续..."))
474 | self.label_12.setText(_translate("MainWindow", "配置文件:"))
475 | self.label_44.setText(_translate("MainWindow", "DataPort:"))
476 | self.label_17.setText(_translate("MainWindow", "速度分辨率:"))
477 | self.label_37.setText(_translate("MainWindow", "20m/s"))
478 | self.pushButton_11.setText(_translate("MainWindow", "send"))
479 | self.pushButton_12.setText(_translate("MainWindow", "Exit"))
480 | self.groupBox_7.setTitle(_translate("MainWindow", "采集/识别"))
481 | self.label_5.setText(_translate("MainWindow", "color:"))
482 | self.groupBox_3.setTitle(_translate("MainWindow", "识别"))
483 | self.label_2.setText(_translate("MainWindow", "model:"))
484 | self.pushButton_15.setText(_translate("MainWindow", "recognize"))
485 | self.groupBox_4.setTitle(_translate("MainWindow", "采集"))
486 | self.label_3.setText(_translate("MainWindow", "scene:"))
487 | self.lineEdit_6.setText(_translate("MainWindow", "chaotic_dataset"))
488 | self.comboBox_3.setItemText(0, _translate("MainWindow", "Back"))
489 | self.comboBox_3.setItemText(1, _translate("MainWindow", "Dblclick"))
490 | self.comboBox_3.setItemText(2, _translate("MainWindow", "Down"))
491 | self.comboBox_3.setItemText(3, _translate("MainWindow", "Front"))
492 | self.comboBox_3.setItemText(4, _translate("MainWindow", "Left"))
493 | self.comboBox_3.setItemText(5, _translate("MainWindow", "Right"))
494 | self.comboBox_3.setItemText(6, _translate("MainWindow", "Up"))
495 | self.pushButton.setText(_translate("MainWindow", "capture"))
496 | self.groupBox.setTitle(_translate("MainWindow", "printlog"))
497 | self.groupBox_9.setTitle(_translate("MainWindow", "雷达数据实时显示"))
498 | self.label.setText(_translate("MainWindow", "距离-时间图"))
499 | self.label_7.setText(_translate("MainWindow", "多普勒-时间图"))
500 | self.label_8.setText(_translate("MainWindow", "距离-俯仰角度图"))
501 | self.label_9.setText(_translate("MainWindow", "距离-方位角度图"))
502 | self.label_11.setText(_translate("MainWindow", "距离-多普勒图"))
503 | self.label_10.setText(_translate("MainWindow", "手势输出"))
504 | self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "real-time system"))
505 | # self.label_4.setText(_translate("MainWindow", "TextLabel"))
506 | self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "waiting..."))
507 | self.menu.setTitle(_translate("MainWindow", "菜单"))
508 | self.actionload.setText(_translate("MainWindow", "mini"))
509 | self.actionload.setToolTip(_translate("MainWindow", "mini窗口"))
510 |
511 | def printlog(self,textEdit,string,fontcolor='green'):
512 | textEdit.moveCursor(QtGui.QTextCursor.End)
513 | gettime = time.strftime("%H:%M:%S", time.localtime())
514 | textEdit.append(""+str(gettime)+"-->"+string+"")
515 |
516 | def slot_init(self):
517 | self.printlog(self.textEdit, 'Welcome!',fontcolor='green')
518 | self.getcolorlist()
519 | self.comboBox.currentIndexChanged.connect(self.setcolor)
520 | self.comboBox_7.arrowClicked.connect(self.configpath) #配置文件
521 | self.comboBox_2.arrowClicked.connect(self.modelpath) #选择识别模型
522 | self.groupBox_3.clicked.connect(lambda:self.IsRecognizeorCapture(box_name = 'box_3'))
523 | self.groupBox_4.clicked.connect(lambda:self.IsRecognizeorCapture(box_name = 'box_4'))
524 |
525 | self.pushButton_15.clicked.connect(lambda:self.Iscapture(self.pushButton_15,'recognizing','recognize'))
526 | self.pushButton.clicked.connect(lambda:self.Iscapture(self.pushButton,'capturing','capture'))
527 |
528 | def IsRecognizeorCapture(self,box_name):
529 | if box_name == 'box_3' and self.groupBox_3.isChecked():
530 | self.groupBox_4.setChecked(False)
531 | self.pushButton.setChecked(False)
532 | self.Iscapture(self.pushButton,'capturing','capture')
533 | elif box_name == 'box_4'and self.groupBox_4.isChecked():
534 | self.groupBox_3.setChecked(False)
535 | self.pushButton_15.setChecked(False)
536 | self.Iscapture(self.pushButton_15,'recognizing','recognize')
537 | elif box_name == 'box_3' and self.groupBox_3.isChecked()==False:
538 | gl.set_value('IsRecognizeorCapture',False)
539 | # self.printlog(self.textEdit, 'IsRecognizeorCapture:False',fontcolor='green')
540 | self.graphicsView_5.setPixmap(QtGui.QPixmap("gesture_icons/7.jpg"))
541 | elif box_name == 'box_4' and self.groupBox_4.isChecked()==False:
542 | gl.set_value('IsRecognizeorCapture',False)
543 | # self.printlog(self.textEdit, 'IsRecognizeorCapture:False',fontcolor='green')
544 | self.graphicsView_5.setPixmap(QtGui.QPixmap("gesture_icons/7.jpg"))
545 |
546 | def Iscapture(self,btn,text1,text2):
547 | if btn.isChecked():
548 | gl.set_value('IsRecognizeorCapture',True)
549 | # self.printlog(self.textEdit, 'IsRecognizeorCapture:True',fontcolor='green')
550 | btn.setText(text1)
551 | else:
552 | gl.set_value('IsRecognizeorCapture',False)
553 | self.printlog(self.textEdit, 'IsRecognizeorCapture:False',fontcolor='green')
554 | self.graphicsView_5.setPixmap(QtGui.QPixmap("gesture_icons/7.jpg"))
555 | btn.setText(text2)
556 |
557 | def modelpath(self):
558 | self.comboBox_2.clear()
559 | self.comboBox_2.addItem("--select--")
560 | list = []
561 | if (os.path.exists(modelpathfile)):
562 | files = os.listdir(modelpathfile)
563 | for file in files:
564 | list.append(modelpathfile+'/'+file)
565 | self.comboBox_2.addItems(list)
566 |
567 | def configpath(self):
568 | self.comboBox_7.clear()
569 | self.comboBox_7.addItem("--select--")
570 | list = []
571 | if (os.path.exists(radarRFconfigpathfile)):
572 | files = os.listdir(radarRFconfigpathfile)
573 | for file in files:
574 | list.append(radarRFconfigpathfile+'/'+file)
575 | self.comboBox_7.addItems(list)
576 |
577 | def getcolorlist(self):
578 | values=matplotlib.cm.cmap_d.keys()
579 | self.comboBox.addItem("--select--")
580 | self.comboBox.addItem("customize")
581 | for value in values:
582 | self.comboBox.addItem(value)
583 |
584 | def setcolor(self):
585 | if(self.comboBox.currentText()!='--select--' and self.comboBox.currentText()!=''):
586 | self.printlog(self.textEdit, 'selected color:'+self.comboBox.currentText(),fontcolor='blue')
587 |
588 |
589 |
590 |
591 | if __name__ == '__main__':
592 | app = QtWidgets.QApplication(sys.argv)
593 | MainWindow = QtWidgets.QMainWindow()
594 | MainWindow.show()
595 | ui = Ui_MainWindow()
596 | ui.setupUi(MainWindow)
597 | # subWin = Qt_pet()
598 | sys.exit(app.exec_())
--------------------------------------------------------------------------------
/colortrans.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2020 Sebastian Höfer
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 |
24 | """
25 | This is an example how to use an ImageView with Matplotlib Colormaps (cmap).
26 | The function 'cmapToColormap' converts the Matplotlib format to the internal
27 | format of PyQtGraph that is used in the GradientEditorItem. The function
28 | itself has no dependencies on Matplotlib! Hence the weird if clauses with
29 | 'hasattr' instead of 'isinstance'.
30 | """
31 |
32 |
33 | import numpy as np
34 | from pyqtgraph.Qt import QtCore, QtGui
35 | import pyqtgraph
36 | import matplotlib
37 | import collections
38 |
39 |
40 | def cmapToColormap(cmap, nTicks=64):
41 | """
42 | Converts a Matplotlib cmap to pyqtgraphs colormaps. No dependency on matplotlib.
43 | Parameters:
44 | *cmap*: Cmap object. Imported from matplotlib.cm.*
45 | *nTicks*: Number of ticks to create when dict of functions is used. Otherwise unused.
46 | """
47 |
48 | # Case #1: a dictionary with 'red'/'green'/'blue' values as list of ranges (e.g. 'jet')
49 | # The parameter 'cmap' is a 'matplotlib.colors.LinearSegmentedColormap' instance ...
50 | if hasattr(cmap, '_segmentdata'):
51 | colordata = getattr(cmap, '_segmentdata')
52 | if ('red' in colordata) and isinstance(colordata['red'], collections.Sequence):
53 |
54 | # collect the color ranges from all channels into one dict to get unique indices
55 | posDict = {}
56 | for idx, channel in enumerate(('red', 'green', 'blue')):
57 | for colorRange in colordata[channel]:
58 | posDict.setdefault(colorRange[0], [-1, -1, -1])[idx] = colorRange[2]
59 |
60 | indexList = list(posDict.keys())
61 | indexList.sort()
62 | # interpolate missing values (== -1)
63 | for channel in range(3): # R,G,B
64 | startIdx = indexList[0]
65 | emptyIdx = []
66 | for curIdx in indexList:
67 | if posDict[curIdx][channel] == -1:
68 | emptyIdx.append(curIdx)
69 | elif curIdx != indexList[0]:
70 | for eIdx in emptyIdx:
71 | rPos = (eIdx - startIdx) / (curIdx - startIdx)
72 | vStart = posDict[startIdx][channel]
73 | vRange = (posDict[curIdx][channel] - posDict[startIdx][channel])
74 | posDict[eIdx][channel] = rPos * vRange + vStart
75 | startIdx = curIdx
76 | del emptyIdx[:]
77 | for channel in range(3): # R,G,B
78 | for curIdx in indexList:
79 | posDict[curIdx][channel] *= 255
80 |
81 | rgb_list = [[i, posDict[i]] for i in indexList]
82 |
83 | # Case #2: a dictionary with 'red'/'green'/'blue' values as functions (e.g. 'gnuplot')
84 | elif ('red' in colordata) and isinstance(colordata['red'], collections.Callable):
85 | indices = np.linspace(0., 1., nTicks)
86 | luts = [np.clip(np.array(colordata[rgb](indices), dtype=np.float), 0, 1) * 255 \
87 | for rgb in ('red', 'green', 'blue')]
88 | rgb_list = zip(indices, list(zip(*luts)))
89 |
90 | # If the parameter 'cmap' is a 'matplotlib.colors.ListedColormap' instance, with the attributes 'colors' and 'N'
91 | elif hasattr(cmap, 'colors') and hasattr(cmap, 'N'):
92 | colordata = getattr(cmap, 'colors')
93 | # Case #3: a list with RGB values (e.g. 'seismic')
94 | if len(colordata[0]) == 3:
95 | indices = np.linspace(0., 1., len(colordata))
96 | scaledRgbTuples = [(rgbTuple[0] * 255, rgbTuple[1] * 255, rgbTuple[2] * 255) for rgbTuple in colordata]
97 | rgb_list = zip(indices, scaledRgbTuples)
98 |
99 | # Case #4: a list of tuples with positions and RGB-values (e.g. 'terrain')
100 | # -> this section is probably not needed anymore!?
101 | elif len(colordata[0]) == 2:
102 | rgb_list = [(idx, (vals[0] * 255, vals[1] * 255, vals[2] * 255)) for idx, vals in colordata]
103 |
104 | # Case #X: unknown format or datatype was the wrong object type
105 | else:
106 | raise ValueError("[cmapToColormap] Unknown cmap format or not a cmap!")
107 |
108 | # Convert the RGB float values to RGBA integer values
109 | return list([(pos, (int(r), int(g), int(b), 255)) for pos, (r, g, b) in rgb_list])
110 |
111 |
112 | def pg_get_cmap(cmap):
113 | if(cmap=='customize'):
114 | position = np.arange(64)
115 | position = position / 64
116 | position[0] = 0
117 | position = np.flip(position)
118 | colors = [[62, 38, 168, 255], [63, 42, 180, 255], [65, 46, 191, 255], [67, 50, 202, 255], [69, 55, 213, 255],
119 | [70, 60, 222, 255], [71, 65, 229, 255], [70, 71, 233, 255], [70, 77, 236, 255], [69, 82, 240, 255],
120 | [68, 88, 243, 255],
121 | [68, 94, 247, 255], [67, 99, 250, 255], [66, 105, 254, 255], [62, 111, 254, 255], [56, 117, 254, 255],
122 | [50, 123, 252, 255],
123 | [47, 129, 250, 255], [46, 135, 246, 255], [45, 140, 243, 255], [43, 146, 238, 255], [39, 150, 235, 255],
124 | [37, 155, 232, 255],
125 | [35, 160, 229, 255], [31, 164, 225, 255], [28, 129, 222, 255], [24, 173, 219, 255], [17, 177, 214, 255],
126 | [7, 181, 208, 255],
127 | [1, 184, 202, 255], [2, 186, 195, 255], [11, 189, 188, 255], [24, 191, 182, 255], [36, 193, 174, 255],
128 | [44, 195, 167, 255],
129 | [49, 198, 159, 255], [55, 200, 151, 255], [63, 202, 142, 255], [74, 203, 132, 255], [88, 202, 121, 255],
130 | [102, 202, 111, 255],
131 | [116, 201, 100, 255], [130, 200, 89, 255], [144, 200, 78, 255], [157, 199, 68, 255], [171, 199, 57, 255],
132 | [185, 196, 49, 255],
133 | [197, 194, 42, 255], [209, 191, 39, 255], [220, 189, 41, 255], [230, 187, 45, 255], [239, 186, 53, 255],
134 | [248, 186, 61, 255],
135 | [254, 189, 60, 255], [252, 196, 57, 255], [251, 202, 53, 255], [249, 208, 50, 255], [248, 214, 46, 255],
136 | [246, 220, 43, 255],
137 | [245, 227, 39, 255], [246, 233, 35, 255], [246, 239, 31, 255], [247, 245, 27, 255], [249, 251, 20, 255]]
138 | colors = np.flip(colors, axis=0)
139 | color_map = pyqtgraph.ColorMap(position, colors)
140 | return color_map
141 | else:
142 |
143 | pos, rgba_colors = zip(*cmapToColormap(cmap))
144 | # Set the colormap
145 | pgColormap = pyqtgraph.ColorMap(pos, rgba_colors)
146 | return pgColormap
147 |
148 |
149 |
150 | # Start Qt event loop unless running in interactive mode.
151 | if __name__ == '__main__':
152 | import sys
153 | import matplotlib.cm
154 |
155 | if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
156 | app = QtGui.QApplication([])
157 |
158 | # Create window with ImageView widget
159 | win = pyqtgraph.GraphicsLayoutWidget()
160 | win.resize(800,800)
161 | win.show()
162 |
163 | imv = pyqtgraph.ImageItem()
164 |
165 | view = win.addViewBox()
166 | view.addItem(imv)
167 | win.setCentralWidget(view)
168 | win.show()
169 | win.setWindowTitle('Example: Matplotlib colormaps in pyqtgraph ImageView')
170 |
171 | # Create random data set with noisy signals
172 | data = np.random.normal(size=(200, 200))
173 | gradient = np.linspace(0., 10., 200)
174 | data += gradient[:, np.newaxis]
175 |
176 |
177 | # Convert a matplotlib colormap into a list of (tickmark, (r,g,b)) tuples
178 | pos, rgba_colors = zip(*cmapToColormap(matplotlib.cm.coolwarm))
179 | # Set the colormap
180 | pgColormap = pyqtgraph.ColorMap(pos, rgba_colors)
181 | imv.setLookupTable(pgColormap.getLookupTable())
182 |
183 | # Display the data
184 | imv.setImage(data)
185 |
186 | QtGui.QApplication.instance().exec_()
--------------------------------------------------------------------------------
/config/IWR1843_cfg.cfg:
--------------------------------------------------------------------------------
1 | % ***************************************************************
2 | % Created for SDK ver:03.05
3 | % Created using Visualizer ver:3.5.0.0
4 | % Frequency:77
5 | % Platform:xWR18xx
6 | % Scene Classifier:best_range
7 | % Azimuth Resolution(deg):15
8 | % Range Resolution(m):0.47
9 | % Maximum unambiguous Range(m):50
10 | % Maximum Radial Velocity(m/s):18.28
11 | % Radial velocity resolution(m/s):2.29
12 | % Frame Duration(msec):50
13 | % RF calibration data:None
14 | % Range Detection Threshold (dB):15
15 | % Doppler Detection Threshold (dB):15
16 | % Range Peak Grouping:enabled
17 | % Doppler Peak Grouping:enabled
18 | % Static clutter removal:disabled
19 | % Angle of Arrival FoV: Full FoV
20 | % Range FoV: Full FoV
21 | % Doppler FoV: Full FoV
22 | % ***************************************************************
23 | sensorStop
24 | flushCfg
25 |
26 | # 1:frame based chirps, 2:continuous chirp, 3:adv frame config [1/3]
27 | dfeDataOutputMode 1
28 |
29 | #* <0(cascading)>[15][x][0]
30 | #***[para 1 2] rx/tx Channel En <0x1111 means four devices enable>
31 | channelCfg 15 5 0
32 |
33 | #* [2]
34 | # [1/2]
35 | #[para 1] bit of one data
36 | #[para 2] complex or real
37 | adcCfg 2 1
38 |
39 | #* [-1]
40 | # [0]
41 | # [1]
42 | # [1]
43 | # [1]
44 | adcbufCfg -1 0 1 1 1
45 |
46 | # ===========================================================================
47 |
48 | #*
49 | # [0] [0]
50 | # ***[para 8]
51 | # ***[para 10]Samples num
52 | # ***[para 11]Samples rate
53 | # ***[para 12]CornerFreq1 <0:175kHz 1:235kHz 2:350kHz 3:700kHz>
54 | # ***[para 13]CornerFreq2 <0:175kHz 1:235kHz 2:350kHz 3:700kHz>
55 | #
56 | #
57 | #
58 | #
59 | profileCfg 0 77 8 7 18.64 0 0 30 1 133 12499 0 0 30
60 |
61 | #*
62 | # [0] [0] [0]
63 | # [0]
64 | chirpCfg 0 0 0 0 0 0 0 1
65 | chirpCfg 1 1 0 0 0 0 0 4
66 | #*
67 | # [should be 4x]
68 | # [1]
69 | #***[para 3]number of loops(num of chirps in a frame)
70 | #***[para 4]num of frames range(0~65535)--0 means infinite
71 | #***[para 5]frame periodicity in ms (float values allowed)
72 | frameCfg 0 1 16 0 50 1 0
73 |
74 | # ===========================================================================
75 |
76 | #* [0]
77 | lowPower 0 0
78 |
79 | # [-1]
80 | #
81 | #
82 | # [0]
83 | # [0]
84 | # [0]
85 | # [0]
86 | guiMonitor -1 1 1 0 0 0 1
87 |
88 | # Must be two lines
89 | cfarCfg -1 0 2 8 4 3 0 15 1
90 | cfarCfg -1 1 0 4 2 3 1 15 1
91 |
92 | # <> [0] ...
93 | multiObjBeamForming -1 1 0.5
94 | # <> [0]
95 | clutterRemoval -1 0
96 | # <> [0] ...
97 | calibDcRangeSig -1 0 -5 8 256
98 | # <> [0]
99 | extendedMaxVelocity -1 0
100 |
101 | #* [-1] [0]
102 | # [1] [0]
103 | lvdsStreamCfg -1 0 1 0
104 |
105 | #
106 | compRangeBiasAndRxChanPhase 0.0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0
107 | # [0] ...
108 | measureRangeBiasAndRxChanPhase 0 1.5 0.2
109 |
110 | CQRxSatMonitor 0 3 4 31 0
111 | CQSigImgMonitor 0 65 4
112 | # [0] [0]
113 | analogMonitor 0 0
114 |
115 | # [-1]
116 | #
117 | aoaFovCfg -1 -90 90 -90 90
118 |
119 | # [-1] <0:range,1:Doppler>
120 | #
121 | cfarFovCfg -1 0 0 49.99
122 | cfarFovCfg -1 1 -18.28 18.28
123 |
124 | calibData 0 0 0
125 |
126 | sensorStart
127 |
--------------------------------------------------------------------------------
/config/IWR6843_cfg.cfg:
--------------------------------------------------------------------------------
1 | % ***************************************************************
2 | % Created for SDK ver:03.04
3 | % Created using Visualizer ver:3.5.0.0
4 | % Frequency:60
5 | % Platform:xWR68xx
6 | % Scene Classifier:best_range_res
7 | % Azimuth Resolution(deg):15
8 | % Range Resolution(m):0.044
9 | % Maximum unambiguous Range(m):9.02
10 | % Maximum Radial Velocity(m/s):1
11 | % Radial velocity resolution(m/s):0.13
12 | % Frame Duration(msec):100
13 | % RF calibration data:None
14 | % Range Detection Threshold (dB):15
15 | % Doppler Detection Threshold (dB):15
16 | % Range Peak Grouping:enabled
17 | % Doppler Peak Grouping:enabled
18 | % Static clutter removal:disabled
19 | % Angle of Arrival FoV: Full FoV
20 | % Range FoV: Full FoV
21 | % Doppler FoV: Full FoV
22 | % ***************************************************************
23 | sensorStop
24 | flushCfg
25 | dfeDataOutputMode 1
26 | channelCfg 15 7 0
27 | adcCfg 2 1
28 | adcbufCfg -1 0 1 1 1
29 | profileCfg 0 60 200 7 40 0 0 100 1 64 2000 0 0 158
30 | chirpCfg 0 0 0 0 0 0 0 1
31 | chirpCfg 1 1 0 0 0 0 0 2
32 | chirpCfg 2 2 0 0 0 0 0 4
33 | frameCfg 0 2 64 0 110 1 0
34 | lowPower 0 0
35 | guiMonitor -1 1 1 0 0 0 1
36 | cfarCfg -1 0 2 8 4 3 0 20 1
37 | cfarCfg -1 1 0 4 2 3 1 15 1
38 | multiObjBeamForming -1 1 0.5
39 | clutterRemoval -1 1
40 | calibDcRangeSig -1 0 -5 8 256
41 | extendedMaxVelocity -1 0
42 | bpmCfg -1 0 0 1
43 | lvdsStreamCfg -1 0 1 0
44 | compRangeBiasAndRxChanPhase 0.0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0
45 | measureRangeBiasAndRxChanPhase 0 1.5 0.2
46 | CQRxSatMonitor 0 3 5 121 0
47 | CQSigImgMonitor 0 127 4
48 | analogMonitor 0 0
49 | aoaFovCfg -1 -90 90 -90 90
50 | cfarFovCfg -1 0 0 0.5
51 | cfarFovCfg -1 1 -3 3.00
52 | calibData 0 0 0
53 | sensorStart
54 |
--------------------------------------------------------------------------------
/dsp/ZoomFFT.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | import numpy as np
14 | from matplotlib import pyplot as plt
15 | from numpy import pi
16 | from numpy.fft import fft, fftfreq, fftshift
17 | from scipy import signal
18 | import logging
19 | import sys
20 |
21 |
22 | class ZoomFFT:
23 | """This class is an implementation of the Zoom Fast Fourier Transform (ZoomFFT).
24 |
25 | The zoom FFT (Fast Fourier Transform) is a signal processing technique used to
26 | analyse a portion of a spectrum at high resolution. The steps to apply the zoom
27 | FFT to this region are as follows:
28 |
29 | 1. Frequency translate to shift the frequency range of interest down to near
30 | 0 Hz (DC)
31 | 2. Low pass filter to prevent aliasing when subsequently sampled at a lower
32 | sample rate
33 | 3. Re-sample at a lower rate
34 | 4. FFT the re-sampled data (Multiple blocks of data are needed to have an FFT of
35 | the same length)
36 |
37 | The resulting spectrum will now have a much smaller resolution bandwidth, compared
38 | to an FFT of non-translated data.
39 |
40 | """
41 |
42 | def __init__(self, low_freq, high_freq, fs, signal=None):
43 | """Initialize the ZoomFFT class.
44 |
45 | Args:
46 | low_freq (int): Lower frequency limit
47 | high_freq (int): Upper frequency limit
48 | fs (int): sampling Frequency
49 | signal (np.ndarray): Signal to perform the ZoomFFT on
50 |
51 | """
52 | self.low_freq = low_freq
53 | self.high_freq = high_freq
54 | self.fs = fs
55 |
56 | if (low_freq < 0) or (high_freq > fs) or ((high_freq - low_freq) > fs):
57 | raise Exception("invalid inputs. Program Terminated! ")
58 |
59 | if len(signal):
60 | self.signal = signal
61 | self.length = len(signal)
62 | else:
63 | # the default now is a sine signal, for demo purpose
64 | pass
65 |
66 | def set_signal(self, signal):
67 | """Sets given signal as a member variable of the class.
68 |
69 | e.g. ZoomFFT.create_signal(generate_sinewave(a, b, c) + generate_sinewave(d, e, f))
70 |
71 | Args:
72 | signal (np.ndarray): Signal to perform the ZoomFFT on
73 |
74 | """
75 | self.signal = signal
76 |
77 | def sinewave(self, f, length, amplitude=1):
78 | """Generates a sine wave which could be used as a part of the signal.
79 |
80 | Args:
81 | f (int): Frequency of the sine wave
82 | length (int): Number of data points in the sine wave
83 | amplitude (int): Amplitude of the sine wave
84 |
85 | Returns:
86 | x (np.ndarray): Generated sine wave with the given parameters.
87 | """
88 | self.length = length
89 | x = amplitude * np.sin(2 * pi * f / self.fs * np.arange(length))
90 | return x
91 |
92 | def compute_fft(self):
93 | """Computes the Fast Fourier Transform (FFT) of the signal.
94 |
95 | Returns:
96 | X (np.ndarray): A frequency-shifted, unscaled, FFT of the signal.
97 | """
98 | try:
99 | X = fft(self.signal)
100 | X = np.abs(fftshift(X)) # unscaled
101 | return X
102 | except NameError:
103 | print("signal not defined. Program terminated!")
104 | except:
105 | print("Unexpected error:", sys.exc_info()[0])
106 | raise
107 |
108 | def plot_fft(self, d=None):
109 | """Plots the Fast Fourier Transform (FFT) of the signal.
110 |
111 | Args:
112 | d (int): Sample spacing (inverse of the sampling rate)
113 |
114 | """
115 | try:
116 | d = 1 / self.fs if d is None else d
117 | X = self.compute_fft()
118 | freq = fftfreq(self.length, d)
119 |
120 | self.original_sample_range = 1 / (self.length * d)
121 |
122 | fig1, ax1 = plt.subplots()
123 | ax1.stem(fftshift(freq), X / self.length)
124 | ax1.set_xlabel('Frequency (Hz)', fontsize=12)
125 | ax1.set_ylabel('Magnitude', fontsize=12)
126 | ax1.set_title('FFT Two-sided spectrum', fontsize=12)
127 | ax1.grid()
128 |
129 | plt.show()
130 | except:
131 | print("Unexpected error:", sys.exc_info()[0])
132 | raise
133 |
134 | def compute_zoomfft(self, resample_number=None):
135 | """Computes the Zoom Fast Fourier Transform (ZoomFFT) of the signal.
136 |
137 | Args:
138 | resample_number (int): The number of samples in the resampled signal.
139 |
140 | Returns:
141 | Xd (np.ndarray): A frequency-shifted, unscaled, ZoomFFT of the signal.
142 | bw_factor (int): Bandwidth factor
143 | fftlen (int): Length of the ZoomFFT output
144 | Ld (int): for internal use
145 | F (int): for internal use
146 | """
147 | try:
148 | bw_of_interest = self.high_freq - self.low_freq
149 |
150 | if self.length % bw_of_interest != 0:
151 | logging.warning("length of signal should be divisible by bw_of_interest. Zoom FFT Spectrum may distort!")
152 | input("Press Enter to continue...")
153 |
154 | fc = (self.low_freq + self.high_freq) / 2
155 | bw_factor = np.floor(self.fs / bw_of_interest).astype(np.uint8)
156 |
157 | # mix the signal down to DC, and filter it through the FIR decimator
158 | ind_vect = np.arange(self.length)
159 | y = self.signal * np.exp(-1j * 2 * pi * ind_vect * fc / self.fs)
160 |
161 | resample_number = bw_of_interest / self.original_sample_range if resample_number is None else resample_number
162 |
163 | resample_range = bw_of_interest / resample_number
164 |
165 | if resample_range != self.original_sample_range:
166 | logging.warning("resample resolution != original sample resolution. Zoom FFT Spectrum may distort!")
167 | input("Press Enter to continue...")
168 |
169 | xd = signal.resample(y, np.int(resample_number))
170 |
171 | fftlen = len(xd)
172 | Xd = fft(xd)
173 | Xd = np.abs(fftshift(Xd)) # unscaled
174 |
175 | Ld = self.length / bw_factor
176 | fsd = self.fs / bw_factor
177 | F = fc + fsd / fftlen * np.arange(fftlen) - fsd / 2
178 | return Xd, bw_factor, fftlen, Ld, F
179 | except NameError:
180 | print("signal not defined. Program terminated!")
181 | except:
182 | print("Unexpected error:", sys.exc_info()[0])
183 | raise
184 |
185 | def plot_zoomfft(self, resample_number=None):
186 | """Plots the Zoom Fast Fourier Transform (ZoomFFT) of the signal.
187 |
188 | Args:
189 | resample_number (int): The number of samples in the resampled signal.
190 |
191 | """
192 | try:
193 | bw_of_interest = self.high_freq - self.low_freq
194 | resample_number = bw_of_interest / self.original_sample_range if resample_number is None else resample_number
195 | Xd, bw_factor, fftlen, Ld, F = self.compute_zoomfft(resample_number)
196 |
197 | fig1, ax1 = plt.subplots()
198 |
199 | ax1.stem(F, Xd / Ld, linefmt='C1-.', markerfmt='C1s')
200 | ax1.grid()
201 | ax1.set_xlabel('Frequency (Hz)', fontsize=12)
202 | ax1.set_ylabel('Magnitude', fontsize=12)
203 | ax1.set_title('Zoom FFT Spectrum. Mixer Approach.', fontsize=12)
204 | fig1.subplots_adjust(hspace=0.35)
205 | plt.show()
206 | except:
207 | print("Unexpected error:", sys.exc_info()[0])
208 | raise
209 |
--------------------------------------------------------------------------------
/dsp/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | from .angle_estimation import *
14 | from .cfar import *
15 | from .compensation import *
16 | from .doppler_processing import *
17 | from .range_processing import *
18 | from .utils import *
19 | from .noise_removal import *
20 | from .music import *
--------------------------------------------------------------------------------
/dsp/cfar.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | import numpy as np
14 | from scipy.ndimage import convolve1d
15 |
16 | """ Various cfar algorithm types
17 |
18 | From https://www.mathworks.com/help/phased/ug/constant-false-alarm-rate-cfar-detectors.html
19 | |-----------------------------------------------------------------------------------------------------------|
20 | | Algorithm | Typical Usage |
21 | |-----------------------------------------------------------------------------------------------------------|
22 | | Cell-averaging CFAR | Most situations |
23 | | Greatest-of cell-averaging CFAR | When it is important to avoid false alarms at the edge of clutter |
24 | | Smallest-of cell-averaging CFAR | When targets are closely located |
25 | | Order statistic CFAR | Compromise between greatest-of and smallest-of cell averaging |
26 | |-----------------------------------------------------------------------------------------------------------|
27 |
28 | """
29 |
30 |
31 | def ca(x, *argv, **kwargs):
32 | """Detects peaks in signal using Cell-Averaging CFAR (CA-CFAR).
33 |
34 | Args:
35 | x (~numpy.ndarray): Signal.
36 | *argv: See mmwave.dsp.cfar.ca\_
37 | **kwargs: See mmwave.dsp.cfar.ca\_
38 |
39 | Returns:
40 | ~numpy.ndarray: Boolean array of detected peaks in x.
41 |
42 | Examples:
43 | >>> signal = np.random.randint(100, size=10)
44 | >>> signal
45 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
46 | >>> det = mm.dsp.ca(signal, l_bound=20, guard_len=1, noise_len=3)
47 | >>> det
48 | array([False, False, True, False, False, False, False, True, False,
49 | True])
50 |
51 | Perform a non-wrapping CFAR
52 |
53 | >>> signal = np.random.randint(100, size=10)
54 | >>> signal
55 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
56 | >>> det = mm.dsp.ca(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')
57 | >>> det
58 | array([False, True, True, False, False, False, False, True, True,
59 | True])
60 |
61 | """
62 | if isinstance(x, list):
63 | x = np.array(x)
64 | threshold, _ = ca_(x, *argv, **kwargs)
65 | ret = (x > threshold)
66 | return ret
67 |
68 |
69 | def ca_(x, guard_len=4, noise_len=8, mode='wrap', l_bound=4000):
70 | """Uses Cell-Averaging CFAR (CA-CFAR) to calculate a threshold that can be used to calculate peaks in a signal.
71 |
72 | Args:
73 | x (~numpy.ndarray): Signal.
74 | guard_len (int): Number of samples adjacent to the CUT that are ignored.
75 | noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.
76 | mode (str): Specify how to deal with edge cells. Examples include 'wrap' and 'constant'.
77 | l_bound (float or int): Additive lower bound while calculating peak threshold.
78 |
79 | Returns:
80 | Tuple [ndarray, ndarray]
81 | 1. (ndarray): Upper bound of noise threshold.
82 | #. (ndarray): Raw noise strength.
83 |
84 | Examples:
85 | >>> signal = np.random.randint(100, size=10)
86 | >>> signal
87 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
88 | >>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3)
89 | >>> threshold
90 | (array([70, 76, 64, 79, 81, 91, 74, 71, 70, 79]), array([50, 56, 44, 59, 61, 71, 54, 51, 50, 59]))
91 |
92 | Perform a non-wrapping CFAR thresholding
93 |
94 | >>> signal = np.random.randint(100, size=10)
95 | >>> signal
96 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
97 | >>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')
98 | >>> threshold
99 | (array([44, 37, 41, 65, 81, 91, 67, 51, 34, 46]), array([24, 17, 21, 45, 61, 71, 47, 31, 14, 26]))
100 |
101 | """
102 | if isinstance(x, list):
103 | x = np.array(x)
104 | assert type(x) == np.ndarray
105 |
106 | kernel = np.ones(1 + (2 * guard_len) + (2 * noise_len), dtype=x.dtype) / (2 * noise_len)
107 | kernel[noise_len:noise_len + (2 * guard_len) + 1] = 0
108 |
109 | noise_floor = convolve1d(x, kernel, mode=mode)
110 | threshold = noise_floor + l_bound
111 |
112 | return threshold, noise_floor
113 |
114 |
115 | def caso(x, *argv, **kwargs):
116 | """Detects peaks in signal using Cell-Averaging Smallest-Of CFAR (CASO-CFAR).
117 |
118 | Args:
119 | x (~numpy.ndarray): Signal.
120 | *argv: See mmwave.dsp.cfar.caso\_
121 | **kwargs: See mmwave.dsp.cfar.caso\_
122 |
123 | Returns:
124 | ~numpy.ndarray: Boolean array of detected peaks in x.
125 |
126 | Examples:
127 | >>> signal = np.random.randint(100, size=10)
128 | >>> signal
129 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
130 | >>> det = mm.dsp.caso(signal, l_bound=20, guard_len=1, noise_len=3)
131 | >>> det
132 | array([False, False, True, False, False, False, False, True, True,
133 | True])
134 |
135 | Perform a non-wrapping CFAR
136 |
137 | >>> signal = np.random.randint(100, size=10)
138 | >>> signal
139 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
140 | >>> det = mm.dsp.caso(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')
141 | >>> det
142 | array([False, True, True, False, False, False, False, True, True,
143 | True])
144 |
145 | """
146 | if isinstance(x, list):
147 | x = np.array(x)
148 | threshold, _ = caso_(x, *argv, **kwargs)
149 | ret = (x > threshold)
150 | return ret
151 |
152 |
153 | def caso_(x, guard_len=4, noise_len=8, mode='wrap', l_bound=4000):
154 | """Uses Cell-Averaging Smallest-Of CFAR (CASO-CFAR) to calculate a threshold that can be used to calculate peaks in a signal.
155 |
156 | Args:
157 | x (~numpy.ndarray): Signal.
158 | guard_len (int): Number of samples adjacent to the CUT that are ignored.
159 | noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.
160 | mode (str): Specify how to deal with edge cells.
161 | l_bound (float or int): Additive lower bound while calculating peak threshold.
162 |
163 | Returns:
164 | Tuple [ndarray, ndarray]
165 | 1. (ndarray): Upper bound of noise threshold.
166 | #. (ndarray): Raw noise strength.
167 |
168 | Examples:
169 | >>> signal = np.random.randint(100, size=10)
170 | >>> signal
171 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
172 | >>> threshold = mm.dsp.caso_(signal, l_bound=20, guard_len=1, noise_len=3)
173 | >>> (threshold[0].astype(int), threshold[1].astype(int))
174 | (array([69, 55, 49, 72, 72, 86, 69, 55, 49, 72]), array([49, 35, 29, 52, 52, 66, 49, 35, 29, 52]))
175 |
176 | Perform a non-wrapping CFAR thresholding
177 |
178 | >>> signal = np.random.randint(100, size=10)
179 | >>> signal
180 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
181 | >>> threshold = mm.dsp.caso_(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')
182 | >>> (threshold[0].astype(int), threshold[1].astype(int))
183 | (array([69, 55, 49, 72, 72, 86, 69, 55, 49, 72]), array([49, 35, 29, 52, 52, 66, 49, 35, 29, 52]))
184 |
185 | """
186 | if isinstance(x, list):
187 | x = np.array(x)
188 |
189 | l_window, r_window = _cfar_windows(x, guard_len, noise_len, mode)
190 |
191 | # Generate scaling based on mode
192 | l_window = l_window / noise_len
193 | r_window = r_window / noise_len
194 | if mode == 'wrap':
195 | noise_floor = np.minimum(l_window, r_window)
196 | elif mode == 'constant':
197 | edge_cells = guard_len + noise_len
198 | noise_floor = np.minimum(l_window, r_window)
199 | noise_floor[:edge_cells] = r_window[:edge_cells]
200 | noise_floor[-edge_cells:] = l_window[-edge_cells:]
201 | else:
202 | raise ValueError(f'Mode {mode} is not a supported mode')
203 |
204 | threshold = noise_floor + l_bound
205 | return threshold, noise_floor
206 |
207 |
208 | def cago(x, *argv, **kwargs):
209 | """Detects peaks in signal using Cell-Averaging Greatest-Of CFAR (CAGO-CFAR).
210 |
211 | Args:
212 | x (~numpy.ndarray): Signal.
213 | *argv: See mmwave.dsp.cfar.cago\_
214 | **kwargs: See mmwave.dsp.cfar.cago\_
215 |
216 | Returns:
217 | ~numpy.ndarray: Boolean array of detected peaks in x.
218 |
219 | Examples:
220 | >>> signal = np.random.randint(100, size=10)
221 | >>> signal
222 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
223 | >>> det = mm.dsp.cago(signal, l_bound=20, guard_len=1, noise_len=3)
224 | >>> det
225 | array([False, False, True, False, False, False, False, True, False,
226 | False])
227 |
228 | Perform a non-wrapping CFAR
229 |
230 | >>> signal = np.random.randint(100, size=10)
231 | >>> signal
232 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
233 | >>> det = mm.dsp.cago(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')
234 | >>> det
235 | array([False, True, True, False, False, False, False, True, True,
236 | True])
237 |
238 | """
239 | if isinstance(x, list):
240 | x = np.array(x)
241 | threshold, _ = cago_(x, *argv, **kwargs)
242 | ret = (x > threshold)
243 | return ret
244 |
245 |
246 | def cago_(x, guard_len=4, noise_len=8, mode='wrap', l_bound=4000):
247 | """Uses Cell-Averaging Greatest-Of CFAR (CAGO-CFAR) to calculate a threshold that can be used to calculate peaks in a signal.
248 |
249 | Args:
250 | x (~numpy.ndarray): Signal.
251 | guard_len (int): Number of samples adjacent to the CUT that are ignored.
252 | noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.
253 | mode (str): Specify how to deal with edge cells.
254 | l_bound (float or int): Additive lower bound while calculating peak threshold.
255 |
256 | Returns:
257 | Tuple [ndarray, ndarray]
258 | 1. (ndarray): Upper bound of noise threshold.
259 | #. (ndarray): Raw noise strength.
260 |
261 | Examples:
262 | >>> signal = np.random.randint(100, size=10)
263 | >>> signal
264 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
265 | >>> threshold = mm.dsp.cago_(signal, l_bound=20, guard_len=1, noise_len=3)
266 | >>> (threshold[0].astype(int), threshold[1].astype(int))
267 | (array([72, 97, 80, 87, 90, 97, 80, 87, 90, 86]), array([52, 77, 60, 67, 70, 77, 60, 67, 70, 66]))
268 |
269 | Perform a non-wrapping CFAR thresholding
270 |
271 | >>> signal = np.random.randint(100, size=10)
272 | >>> signal
273 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
274 | >>> threshold = mm.dsp.cago_(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')
275 | >>> (threshold[0].astype(int), threshold[1].astype(int))
276 | (array([69, 55, 49, 72, 90, 97, 69, 55, 49, 72]), array([49, 35, 29, 52, 70, 77, 49, 35, 29, 52]))
277 |
278 | """
279 | if isinstance(x, list):
280 | x = np.array(x)
281 |
282 | l_window, r_window = _cfar_windows(x, guard_len, noise_len, mode)
283 |
284 | # Generate scaling based on mode
285 | l_window = l_window / noise_len
286 | r_window = r_window / noise_len
287 | if mode == 'wrap':
288 | noise_floor = np.maximum(l_window, r_window)
289 | elif mode == 'constant':
290 | edge_cells = guard_len + noise_len
291 | noise_floor = np.maximum(l_window, r_window)
292 | noise_floor[:edge_cells] = r_window[:edge_cells]
293 | noise_floor[-edge_cells:] = l_window[-edge_cells:]
294 | else:
295 | raise ValueError(f'Mode {mode} is not a supported mode')
296 |
297 | threshold = noise_floor + l_bound
298 | return threshold, noise_floor
299 |
300 |
301 | def os(x, *argv, **kwargs):
302 | """Performs Ordered-Statistic CFAR (OS-CFAR) detection on the input array.
303 |
304 | Args:
305 | x (~numpy.ndarray): Noisy array to perform cfar on with log values
306 | *argv: See mmwave.dsp.cfar.os\_
307 | **kwargs: See mmwave.dsp.cfar.os\_
308 |
309 |
310 | Returns:
311 | ~numpy.ndarray: Boolean array of detected peaks in x.
312 |
313 | Examples:
314 | >>> signal = np.random.randint(100, size=10)
315 | >>> signal
316 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
317 | >>> det = mm.dsp.os(signal, k=3, scale=1.1, guard_len=0, noise_len=3)
318 | >>> det
319 | array([False, True, True, False, False, False, False, True, False,
320 | True])
321 |
322 | """
323 | if isinstance(x, list):
324 | x = np.array(x)
325 | threshold, _ = os_(x, *argv, **kwargs)
326 | ret = (x > threshold)
327 | return ret
328 |
329 |
330 | def os_(x, guard_len=0, noise_len=8, k=12, scale=1.0):
331 | """Performs Ordered-Statistic CFAR (OS-CFAR) detection on the input array.
332 |
333 | Args:
334 | x (~numpy.ndarray): Noisy array to perform cfar on with log values
335 | guard_len (int): Number of samples adjacent to the CUT that are ignored.
336 | noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.
337 | k (int): Ordered statistic rank to sample from.
338 | scale (float): Scaling factor.
339 |
340 | Returns:
341 | Tuple [ndarray, ndarray]
342 | 1. (ndarray): Upper bound of noise threshold.
343 | #. (ndarray): Raw noise strength.
344 |
345 | Examples:
346 | >>> signal = np.random.randint(100, size=10)
347 | >>> signal
348 | array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])
349 | >>> threshold = mm.dsp.os_(signal, k=3, scale=1.1, guard_len=0, noise_len=3)
350 | >>> (threshold[0].astype(int), threshold[1].astype(int))
351 | (array([93, 59, 58, 58, 83, 59, 59, 58, 83, 83]), array([85, 54, 53, 53, 76, 54, 54, 53, 76, 76]))
352 |
353 | """
354 | if isinstance(x, list):
355 | x = np.array(x, dtype=np.uint32)
356 |
357 | n = len(x)
358 | noise_floor = np.zeros(n)
359 | threshold = np.zeros(n, dtype=np.float32)
360 | cut_idx = -1
361 |
362 | # Initial CUT
363 | left_idx = list(np.arange(n - noise_len - guard_len - 1, n - guard_len - 1))
364 | right_idx = list(np.arange(guard_len, guard_len + noise_len))
365 |
366 | # All other CUTs
367 | while cut_idx < (n - 1):
368 | cut_idx += 1
369 |
370 | left_idx.pop(0)
371 | left_idx.append((cut_idx - 1) % n)
372 |
373 | right_idx.pop(0)
374 | right_idx.append((cut_idx + guard_len + noise_len) % n)
375 |
376 | window = np.concatenate((x[left_idx], x[right_idx]))
377 | window.partition(k)
378 | noise_floor[cut_idx] = window[k]
379 | threshold[cut_idx] = noise_floor[cut_idx] * scale
380 |
381 | return threshold, noise_floor
382 |
383 |
384 | def _cfar_windows(x, guard_len, noise_len, mode):
385 | if type(x) != np.ndarray:
386 | raise TypeError(f'Expected array-like input got {type(x)}')
387 |
388 | # Create kernels
389 | r_kernel = np.zeros(1 + (2 * guard_len) + (2 * noise_len), dtype=x.dtype)
390 | r_kernel[:noise_len] = 1
391 | l_kernel = r_kernel[::-1]
392 |
393 | # Do initial convolutions
394 | l_window = convolve1d(x, l_kernel, mode=mode)
395 | r_window = convolve1d(x, r_kernel, mode=mode)
396 |
397 | return l_window, r_window
398 |
399 |
400 | WRAP_UP_LIST_IDX = lambda x, total: x if x >= 0 else x + total
401 | WRAP_DN_LIST_IDX = lambda x, total: x if x < total else x - total
402 | WRAP_DOPPLER_IDX = lambda x, num_doppler_bins: np.bitwise_and(x, num_doppler_bins - 1)
403 | DOPPLER_IDX_TO_SIGNED = lambda idx, fft_size: idx if idx < fft_size // 2 else idx - fft_size
404 |
405 |
406 | def peak_grouping(obj_raw,
407 | det_matrix,
408 | num_doppler_bins,
409 | max_range_idx,
410 | min_range_idx,
411 | group_in_doppler_direction,
412 | group_in_range_direction):
413 | """Performs peak grouping on detection Range/Doppler matrix.
414 |
415 | The function groups neighboring peaks into one. The grouping is done according to two input flags:
416 | group_in_doppler_direction and group_in_doppler_direction. For each detected peak the function checks if the peak is
417 | greater than its neighbors. If this is true, the peak is copied to the output list of detected objects. The
418 | neighboring peaks that are used for checking are taken from the detection matrix and copied into 3x3 kernel
419 | regardless of whether they are CFAR detected or not. Note: Function always reads 9 samples per detected object
420 | from L3 memory into local array tempBuff, but it only needs to read according to input flags. For example if only
421 | the group_in_doppler_direction flag is set, it only needs to read middle row of the kernel, i.e. 3 samples per
422 | target from detection matrix.
423 |
424 | Args:
425 | obj_raw (np.ndarray): (num_detected_objects, 3). detected objects from CFAR.
426 | det_matrix (np.ndarray): Range-doppler profile. shape is numRangeBins x num_doppler_bins.
427 | num_doppler_bins (int): number of doppler bins.
428 | max_range_idx (int): max range of detected objects.
429 | min_range_idx (int): min range of detected objects
430 | group_in_doppler_direction (int): flag to perform grouping along doppler direction.
431 | group_in_range_direction (int): flag to perform grouping along range direction.
432 |
433 | Returns:
434 | obj_out (np.ndarray): detected object after grouping.
435 |
436 | """
437 |
438 | num_detected_objects = obj_raw.shape[0]
439 |
440 | num_obj_out = 0
441 | kernel = np.empty([9])
442 |
443 | if (group_in_doppler_direction == 1) and (group_in_range_direction == 1):
444 | # Grouping both in Range and Doppler direction
445 | start_ind = 0
446 | step_ind = 1
447 | end_ind = 8
448 | elif (group_in_doppler_direction == 0) and (group_in_range_direction == 1):
449 | # Grouping only in Range direction
450 | start_ind = 1
451 | step_ind = 3
452 | end_ind = 7
453 | elif (group_in_doppler_direction == 1) and (group_in_range_direction == 0):
454 | # Grouping only in Doppler direction */
455 | start_ind = 3
456 | step_ind = 1
457 | end_ind = 5
458 | else:
459 | # No grouping, copy all detected objects to the output matrix within specified min max range
460 | # num_detected_objects = min(num_detected_objects, MAX_OBJ_OUT)
461 | obj_out = obj_raw[obj_raw[:, RANGEIDX] <= max_range_idx and obj_raw[:, RANGEIDX] > min_range_idx]
462 | obj_out[:, DOPPLERIDX] = np.bitwise_and(obj_out[:, DOPPLERIDX], num_doppler_bins - 1)
463 |
464 | return obj_out
465 |
466 | # Start checking
467 | obj_out = np.zeros((num_obj_out, 3))
468 | for i in range(num_detected_objects):
469 | detected_obj_flag = 0
470 | range_idx = obj_raw[i, 0]
471 | doppler_idx = obj_raw[i, 1]
472 | peak_val = obj_raw[i, 2]
473 |
474 | if (range_idx <= max_range_idx) and (range_idx >= min_range_idx):
475 | detected_obj_flag = 1
476 |
477 | # Fill local 3x3 kernel from detection matrix in L3
478 | start_idx = (range_idx - 1) * num_doppler_bins
479 | temp_ptr = det_matrix[start_idx:]
480 | row_start = 0
481 | row_end = 2
482 |
483 | if range_idx == min_range_idx:
484 | start_idx = range_idx * num_doppler_bins
485 | temp_ptr = det_matrix[start_idx:]
486 | row_start = 1
487 | kernel[0] = 0
488 | kernel[1] = 0
489 | kernel[2] = 0
490 | elif range_idx == max_range_idx:
491 | row_end = 1
492 | kernel[6] = 0
493 | kernel[7] = 0
494 | kernel[8] = 0
495 |
496 | for j in range(row_start, row_end + 1):
497 | for k in range(3):
498 |
499 | temp_idx = doppler_idx + (k - 1)
500 |
501 | if temp_idx < 0:
502 | temp_idx += num_doppler_bins
503 | elif temp_idx >= num_doppler_bins:
504 | temp_idx -= num_doppler_bins
505 |
506 | kernel[j * 3 + k] = temp_ptr[temp_idx]
507 |
508 | temp_ptr = temp_ptr[num_doppler_bins:]
509 |
510 | # Compare the detected object to its neighbors
511 | # Detected object is at index 4
512 | for k in range(start_ind, end_ind + 1, step_ind):
513 | if kernel[k] > kernel[4]:
514 | detected_obj_flag = 0
515 |
516 | if detected_obj_flag == 1:
517 | obj_out[num_obj_out, 0] = range_idx
518 | obj_out[num_obj_out, 1] = DOPPLER_IDX_TO_SIGNED(doppler_idx, num_doppler_bins)
519 | obj_out[num_obj_out, 2] = peak_val
520 | num_obj_out += 1
521 |
522 | if num_obj_out >= MAX_OBJ_OUT:
523 | break
524 |
525 | return num_obj_out, obj_out
526 |
527 |
528 | def peak_grouping_qualified(obj_raw,
529 | num_doppler_bins,
530 | max_range_idx,
531 | min_range_idx,
532 | group_in_doppler_direction,
533 | group_in_range_direction):
534 | """Performs peak grouping on list of CFAR detected objects.
535 |
536 | The function groups neighboring peaks into one. The grouping is done according to two input flags:
537 | group_in_doppler_direction and group_in_doppler_direction. For each detected peak the function checks if the peak is
538 | greater than its neighbors. If this is true, the peak is copied to the output list of detected objects. The
539 | neighboring peaks that are used for checking are taken from the list of CFAR detected objects, (not from the
540 | detection matrix), and copied into 3x3 kernel that has been initialized to zero for each peak under test. If the
541 | neighboring cell has not been detected by CFAR, its peak value is not copied into the kernel. Note: Function always
542 | search for 8 peaks in the list, but it only needs to search according to input flags.
543 |
544 | Args:
545 | obj_raw (np.ndarray): (num_detected_objects, 3). detected objects from CFAR.
546 | num_doppler_bins (int): number of doppler bins.
547 | max_range_idx (int): max range of detected objects.
548 | min_range_idx (int): min range of detected objects
549 | group_in_doppler_direction (int): flag to perform grouping along doppler direction.
550 | group_in_range_direction (int): flag to perform grouping along range direction.
551 |
552 | Returns:
553 | obj_out (np.ndarray): detected object after grouping.
554 |
555 | """
556 |
557 | num_detected_objects = obj_raw.shape[0]
558 |
559 | if (group_in_doppler_direction == 1) and (group_in_range_direction == 1):
560 | # Grouping both in Range and Doppler direction
561 | start_ind = 0
562 | step_ind = 1
563 | end_ind = 8
564 | elif (group_in_doppler_direction == 0) and (group_in_range_direction == 1):
565 | # Grouping only in Range direction
566 | start_ind = 1
567 | step_ind = 3
568 | end_ind = 7
569 | elif (group_in_doppler_direction == 1) and (group_in_range_direction == 0):
570 | # Grouping only in Doppler direction */
571 | start_ind = 3
572 | step_ind = 1
573 | end_ind = 5
574 | else:
575 | # No grouping, copy all detected objects to the output matrix within specified min max range
576 | num_detected_objects = min(num_detected_objects, MAX_OBJ_OUT)
577 | obj_out = obj_raw[(obj_raw['range_idx'][:num_detected_objects] <= max_range_idx) &
578 | (obj_raw['range_idx'][:num_detected_objects] > min_range_idx)]
579 |
580 | return obj_out
581 |
582 | # Start checking
583 | idx_obj_in_range = np.argwhere((obj_raw['range_idx'] <= max_range_idx) &
584 | (obj_raw['range_idx'] >= min_range_idx))[:, 0]
585 |
586 | obj_in_range = obj_raw[idx_obj_in_range]
587 | kernels = np.zeros((obj_in_range.shape[0], 9))
588 | detected_obj_flag = np.ones(obj_in_range.shape[0])
589 |
590 | # Populate the middle column.
591 | # Populate the 4th element.
592 | kernels[:, 4] = obj_in_range['peakVal']
593 |
594 | # Populate the 1st element.
595 | obj_in_range_previous = obj_raw[idx_obj_in_range - 1]
596 | assert obj_in_range_previous.shape == obj_in_range.shape, "obj_in_range_previous indexing is wrong"
597 | idx_temp = ((obj_in_range_previous['range_idx']) == (obj_in_range['range_idx'] - 1)) & \
598 | ((obj_in_range_previous['doppler_idx']) == (obj_in_range['doppler_idx']))
599 | kernels[idx_temp, 1] = obj_in_range_previous['peakVal'][idx_temp]
600 | # 0th detected object has no left neighbor.
601 | kernels[idx_obj_in_range[idx_obj_in_range[:] == 0], 1] = 0
602 |
603 | # Populate the 7th element.
604 | obj_in_range_next = obj_raw[(idx_obj_in_range + 1) % num_detected_objects]
605 | assert obj_in_range_next.shape == obj_in_range.shape, "obj_in_range_next indexing is wrong"
606 | idx_temp = ((obj_in_range_next['range_idx']) == (obj_in_range['range_idx'] + 1)) & \
607 | ((obj_in_range_next['doppler_idx']) == (obj_in_range['doppler_idx']))
608 | kernels[idx_temp, 7] = obj_in_range_next['peakVal'][idx_temp]
609 | # last detected object, i.e. num_detected_objects-th has no left neighbor.
610 | kernels[idx_obj_in_range[idx_obj_in_range[:] == num_detected_objects], 7] = 0
611 |
612 | for i, idxDeteced in enumerate(idx_obj_in_range):
613 | doppler_idx = obj_in_range['doppler_idx'][i]
614 | range_idx = obj_in_range['range_idx'][i]
615 | # Fill the left column
616 | k_left = WRAP_UP_LIST_IDX(idxDeteced - 1, num_detected_objects)
617 | k_right = WRAP_DN_LIST_IDX(idxDeteced + 1, num_detected_objects)
618 | for _ in range(num_detected_objects):
619 | k_left_doppler_idx = obj_raw['doppler_idx'][k_left]
620 | k_left_range_idx = obj_raw['range_idx'][k_left]
621 | k_left_peak_val = obj_raw['peakVal'][k_left]
622 | if k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 2, num_doppler_bins):
623 | break
624 | if k_left_range_idx == range_idx + 1 and k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 1,
625 | num_doppler_bins):
626 | kernels[i, 6] = k_left_peak_val
627 | elif k_left_range_idx == range_idx and k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 1,
628 | num_doppler_bins):
629 | kernels[i, 3] = k_left_peak_val
630 | elif k_left_range_idx == range_idx - 1 and k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 1,
631 | num_doppler_bins):
632 | kernels[i, 0] = k_left_peak_val
633 | k_left = WRAP_UP_LIST_IDX(k_left - 1, num_detected_objects)
634 |
635 | k_right_doppler_idx = obj_raw['doppler_idx'][k_right]
636 | k_right_range_idx = obj_raw['range_idx'][k_right]
637 | k_right_peak_val = obj_raw['peakVal'][k_right]
638 | if k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 2, num_doppler_bins):
639 | break
640 | if k_right_range_idx == range_idx + 1 and k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx + 1,
641 | num_doppler_bins):
642 | kernels[i, 8] = k_right_peak_val
643 | elif k_right_range_idx == range_idx and k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx + 1,
644 | num_doppler_bins):
645 | kernels[i, 5] = k_right_peak_val
646 | elif k_right_range_idx == range_idx - 1 and k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx + 1,
647 | num_doppler_bins):
648 | kernels[i, 2] = k_right_peak_val
649 | k_right = WRAP_DN_LIST_IDX(k_right + 1, num_detected_objects)
650 |
651 | detected_obj_flag[np.argwhere(np.max(kernels[:, start_ind:end_ind:step_ind]) != kernels[:, 4])] = 0
652 | obj_out = obj_in_range[detected_obj_flag[:] == 1]
653 |
654 | if obj_out.shape[0] > MAX_OBJ_OUT:
655 | obj_out = obj_out[:MAX_OBJ_OUT, ...]
656 |
657 | return obj_out
658 |
--------------------------------------------------------------------------------
/dsp/compensation.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | import math
14 | from .utils import *
15 |
16 |
17 | def _generate_dft_sin_cos_table(dft_length):
18 | """Generate SIN/COS table for doppler compensation reference.
19 |
20 | Generate SIN/COS table. Also generates Sine/Cosine at half, one thrid and two thirds the bin value. This is a helper
21 | function only called by add_doppler_compensation().
22 |
23 | Args:
24 | dft_len: (int) dft_len Length of the DFT. It is used as numDopperBins, which is numCHirpsPerFrame/numTxAntenns.
25 |
26 | Returns:
27 | dft_sin_cos_table (np.ndarray): ndarray in complex format with generated sine (image) cosine (real) table.
28 | bins (np.ndarray): Sin/Cos at half, one thrid and two thirds the bin.
29 | """
30 | dft_sin_cos_table = np.arange(dft_length, dtype=np.float32)
31 | dft_sin_cos_table = np.cos(2 * np.pi * dft_sin_cos_table / dft_length) + \
32 | 1j * -np.sin(2 * np.pi * dft_sin_cos_table / dft_length)
33 |
34 | # 1/2, 1/3 and 2/3 bins
35 | bins = np.array([0.5, 1.0/3, 2.0/3])
36 | bins = np.cos(2 * np.pi * bins / dft_length) - 1j * np.sin(2 * np.pi * bins / dft_length)
37 |
38 | return dft_sin_cos_table, bins
39 |
40 | def add_doppler_compensation(input_data,
41 | num_tx_antennas,
42 | doppler_indices=None,
43 | num_doppler_bins=None):
44 | """Compensation of Doppler phase shift in the virtual antennas.
45 |
46 | Compensation of Doppler phase shift on the virtual antennas (corresponding to second or third Tx antenna chirps).
47 | Symbols corresponding to virtual antennas, are rotated by half of the Doppler phase shift measured by Doppler FFT
48 | for 2 Tx system and 1/3 and 2/3 of the Doppler phase shift for 3 Tx system. The phase shift read from the table
49 | using half or 1/3 of the object Doppler index value. If the Doppler index is odd, an extra half of the bin phase
50 | shift is added.
51 |
52 | The original function is called per detected objects. This functions is modified to directly compensate the
53 | azimuth_in matrix (numDetObj, num_angle_bins)
54 |
55 | Args:
56 | input_data (ndarray): (range, num_antennas, doppler) Radar data cube that needs to be compensated. It can be the input
57 | of azimuth FFT after CFAR or the intermediate right before beamforming.
58 | num_tx_antennas (int): Number of transmitters.
59 | num_doppler_bins (int): (Optional) Number of doppler bins in the radar data cube. If given, that means the doppler
60 | indices are signed and needs to be converted to unsigned.
61 | doppler_indices (ndarray): (Optional) Doppler index of the object with the shape of (num_detected_objects). If given,
62 | that means we only compensate on selected doppler bins.
63 |
64 | Return:
65 | input_data (ndarray): Original input data with the columns related to virtual receivers got compensated.原始数据
66 |
67 | Example:
68 | >>> # If the compensation is done right before naive azimuth FFT and objects is detected already. you need to
69 | >>> # feed in the doppler_indices
70 | >>> dataIn = add_doppler_compensation(dataIn, 3, doppler_indices, 128)
71 | """
72 | num_antennas = input_data.shape[1]
73 | if num_tx_antennas == 1:
74 | return input_data
75 | elif num_tx_antennas > 3:
76 | raise ValueError("the specified number of transimitters is currently not supported")
77 |
78 | # Call the gen function above to generate the tables.
79 | azimuth_mod_coefs, bins = _generate_dft_sin_cos_table(int(num_doppler_bins))
80 |
81 | # Convert signed doppler indices to unsigned and divide Doppler index by 2.
82 | if doppler_indices is not None:
83 | if num_doppler_bins is not None:
84 | doppler_compensation_indices = doppler_indices & (num_doppler_bins - 1)
85 | doppler_compensation_indices[doppler_compensation_indices[:] >= (num_doppler_bins / 2)] -= num_doppler_bins
86 | doppler_compensation_indices = doppler_compensation_indices // 2
87 | doppler_compensation_indices[doppler_compensation_indices[:] < 0] += num_doppler_bins
88 | exp_doppler_compensation = azimuth_mod_coefs[doppler_compensation_indices]
89 | else:
90 | exp_doppler_compensation = azimuth_mod_coefs
91 |
92 | # Add half bin rotation if Doppler index was odd
93 | if num_tx_antennas == 2:
94 | exp_doppler_compensation[(doppler_indices[:] % 2) == 1] *= bins[0]
95 | else:
96 | exp_doppler_compensation[(doppler_indices[:] % 3) == 1] *= bins[1]
97 | exp_doppler_compensation[(doppler_indices[:] % 3) == 2] *= bins[2]
98 |
99 | # Expand the dim so that the broadcasting below will work.
100 | exp_doppler_compensation = np.expand_dims(exp_doppler_compensation, axis=1)
101 |
102 | # Rotate
103 | azimuth_values = input_data[:, (num_antennas/num_tx_antennas):, :]
104 | for azi_val in azimuth_values:
105 | Re = exp_doppler_compensation.real * azi_val.imag - exp_doppler_compensation.imag * azi_val.real
106 | Im = exp_doppler_compensation.imag * azi_val.imag + exp_doppler_compensation.real * azi_val.real
107 | input_data[:, (num_antennas/num_tx_antennas):, :] = Re + 1j * Im
108 |
109 | return input_data
110 |
111 |
112 | def rx_channel_phase_bias_compensation(rx_channel_compensations, input, num_antennas):
113 | """Compensation of rx channel phase bias.
114 |
115 | Args:
116 | rx_channel_compensations: rx channel compensation coefficient.
117 | input: complex number.
118 | num_antennas: number of symbols.
119 | """
120 | azimuth_values = input[:num_antennas]
121 | rx_channel_compensations_values = rx_channel_compensations[:num_antennas]
122 |
123 | Re = rx_channel_compensations_values * (azimuth_values.imag - azimuth_values.real)
124 | Im = rx_channel_compensations_values * (azimuth_values.imag + azimuth_values.real)
125 | input[:num_antennas] = Re + 1j * Im
126 |
127 | return
128 |
129 |
130 | def near_field_correction(idx,
131 | detected_objects,
132 | start_range_index,
133 | end_range_index,
134 | azimuth_input,
135 | azimuth_output,
136 | num_angle_bins,
137 | num_rx_antennas,
138 | range_resolution):
139 | """Correct phase error as the far-field plane wave assumption breaks.
140 |
141 | Calculates near field correction for input detected index (corresponding
142 | to a range position). Referring to top level doxygen @ref
143 | nearFieldImplementation, this function performs the Set 1 rotation with the
144 | correction and adds to Set 0 in place to produce result in Set 0 of the
145 | azimuth_output.
146 |
147 | This correction is done per detected objects from CFAR detection
148 |
149 | Args:
150 | idx: index of the detected objects in detected_objects.
151 | detected_objects: detected objects matrix with dimension of 100 x 6, where it holds at most 100 objects and 6 members are
152 | rangeIdx, dopplerIdx, peakVal, x, y and z. It is configured as a structured array.
153 | start_range_index: start range index of near field correction.
154 | end_range_index: end range index of near field correction.
155 | azimuth_input: complex array of which length is num_angle_bins+numVirtualAntAzim, where numVirtualAntAzim = 4, 8 or 12
156 | depending on how many Txs are used.
157 |
158 | Returns:
159 | None. azimuth_output is changed in-place.
160 | """
161 |
162 | LAMBDA_77GHz_MILLIMETER = 3e8 / 77e9
163 | MMWDEMO_TWO_PI_OVER_LAMBDA = 2.0 * math.pi / LAMBDA_77GHz_MILLIMETER
164 |
165 | # Sanity check and check if nearFieldCorrection is necessary.
166 | assert idx >= 0 and idx < MAX_OBJ_OUT, "idx is out of bound!"
167 | rangeIdx = detected_objects['rangeIdx'][idx]
168 | if rangeIdx < start_range_index or rangeIdx >= end_range_index:
169 | print("{} is out of the nearFieldCorrection range".format(rangeIdx))
170 | return
171 |
172 | # num_angle_bins = 64
173 | azimuth_input[:num_angle_bins] = 0
174 | azimuth_input[num_rx_antennas: num_rx_antennas + num_rx_antennas] = azimuth_input[num_angle_bins:]
175 |
176 | # azimuth_output has length of 2*num_angle_bins.
177 | azimuth_output[num_angle_bins:] = np.fft.fft(azimuth_input, n=num_angle_bins)
178 |
179 | # #define MMWDEMO_NEAR_FIELD_A (0)
180 | # B can be changed to position the desired reference (boresight) in the geometry */
181 | # #define MMWDEMO_NEAR_FIELD_B (LAMBDA_77GHz_MILLIMETER) //((((2 + 0.75) * LAMBDA_77GHz_MILLIMETER)) + 8.7)
182 | # #define MMWDEMO_NEAR_FIELD_C (2 * LAMBDA_77GHz_MILLIMETER)
183 | # 8.7 mm is the actual (approximate) measurement of distance between tx1 and rx4,
184 | # measured using a wooden scale that has a resolution of 1 mm
185 | # #define MMWDEMO_NEAR_FIELD_D (MMWDEMO_NEAR_FIELD_C + 8.7)
186 | # #define MMWDEMO_NEAR_FIELD_E (MMWDEMO_NEAR_FIELD_D + 1.5 * LAMBDA_77GHz_MILLIMETER)
187 | geometry_points = {"A": 0,
188 | "B": LAMBDA_77GHz_MILLIMETER,
189 | "C": 2 * LAMBDA_77GHz_MILLIMETER,
190 | "D": 2 * LAMBDA_77GHz_MILLIMETER + 8.7,
191 | "E": (2 + 1.5) * LAMBDA_77GHz_MILLIMETER + 8.7}
192 |
193 | # AB, CB, DB, EB
194 | geometry_lines = np.array([geometry_points["A"] - geometry_points["B"],
195 | geometry_points["C"] - geometry_points["B"],
196 | geometry_points["D"] - geometry_points["B"],
197 | geometry_points["E"] - geometry_points["B"]])
198 |
199 | geometry_lines_square = geometry_lines * geometry_lines
200 |
201 | range_in_millimeter = (detected_objects['rangeIdx'][idx] * range_resolution - range_resolution) * 1000
202 | range_squared = range_in_millimeter * range_in_millimeter
203 | theta_incrementation = 2.0 / num_angle_bins
204 |
205 | for i in range(num_angle_bins):
206 | theta = i * theta_incrementation if i < num_angle_bins / 2 else (i - num_angle_bins) * theta_incrementation
207 |
208 | tx1 = np.sqrt(range_squared + geometry_lines_square[1] - range_in_millimeter * theta * geometry_lines[1] * 2)
209 | rx4 = np.sqrt(range_squared + geometry_lines_square[2] - range_in_millimeter * theta * geometry_lines[2] * 2)
210 | tx2 = np.sqrt(range_squared + geometry_lines_square[0] - range_in_millimeter * theta * geometry_lines[0] * 2)
211 | rx1 = np.sqrt(range_squared + geometry_lines_square[3] - range_in_millimeter * theta * geometry_lines[3] * 2)
212 |
213 | if range > 0:
214 | psi = MMWDEMO_TWO_PI_OVER_LAMBDA * ((tx2 + rx1) - (rx4 + tx1)) - np.pi * theta
215 | corrReal = np.cos(psi)
216 | corrImag = np.sin(-psi)
217 |
218 | out1CorrReal = azimuth_output[num_angle_bins + i].real * corrReal + \
219 | azimuth_output[num_angle_bins + i].imag * corrImag
220 | out1CorrImag = azimuth_output[num_angle_bins + i].imag * corrReal + \
221 | azimuth_output[num_angle_bins + i].real * corrImag
222 |
223 | azimuth_output[i] = (azimuth_output[i].real + out1CorrReal) + \
224 | (azimuth_output[i].imag + out1CorrImag) * 1j
225 |
226 | return
227 |
228 | # 清除positive_bin_idx:negative_bin_idx之间的静态目标
229 | def dc_range_signature_removal(fft_out1_d,
230 | positive_bin_idx,
231 | negative_bin_idx,
232 | calib_dc_range_sig_cfg,
233 | num_tx_antennas,
234 | num_chirps_per_frame):
235 | """Compensation of DC range antenna signature.
236 |
237 | Antenna coupling signature dominates the range bins close to the radar. These are the bins in the range FFT output
238 | located around DC. This feature is under user control in terms of enable/disable and start/end range bins through a
239 | CLI command called calibDcRangeSig. During measurement (when the CLI command is issued with feature enabled), each
240 | of the specified range bins for each of the virtual antennas are accumulated over the specified number of chirps
241 | and at the end of the period, the average is computed for each bin/antenna combination for removal after the
242 | measurement period is over. Note that the number of chirps to average must be power of 2. It is assumed that no
243 | objects are present in the vicinity of the radar during this measurement period. After measurement is done, the
244 | removal starts for all subsequent frames during which each of the bin/antenna average estimate is subtracted from
245 | the corresponding received samples in real-time for subsequent processing.
246 |
247 | This function has a measurement phase while calib_dc_range_sig_cfg.counter is less than the preferred value and calibration
248 | phase afterwards. The original function is performed per chirp. Here it is modified to be called per frame.
249 |
250 | Args:
251 | fft_out1_d: (num_chirps_per_frame, num_rx_antennas, numRangeBins). Output of 1D FFT.
252 | positive_bin_idx: the first positive_bin_idx range bins (inclusive) to be compensated.
253 | negative_bin_idx: the last -negative_bin_idx range bins to be compensated.
254 | calib_dc_range_sig_cfg: a simple class for calibration configuration's storing purpose.
255 | num_tx_antennas: number of transmitters.
256 | num_chirps_per_frame: number of total chirps per frame.
257 |
258 | Returns:
259 | None. fft_out1_d is modified in-place.
260 | """
261 | if not calib_dc_range_sig_cfg.counter:
262 | calib_dc_range_sig_cfg.mean.fill(0)
263 |
264 | # Calibration
265 | if calib_dc_range_sig_cfg.counter < calib_dc_range_sig_cfg.num_frames * num_tx_antennas:
266 | # Accumulate
267 | calib_dc_range_sig_cfg.mean[0, :positive_bin_idx + 1] = np.sum(
268 | fft_out1_d[0::2, :, :positive_bin_idx + 1],
269 | axis=(0, 1))
270 | calib_dc_range_sig_cfg.mean[0, positive_bin_idx + 1:] = np.sum(fft_out1_d[0::2, :, negative_bin_idx:],
271 | axis=(0, 1))
272 |
273 | calib_dc_range_sig_cfg.mean[1, :positive_bin_idx + 1] = np.sum(
274 | fft_out1_d[1::2, :, :positive_bin_idx + 1],
275 | axis=(0, 1))
276 | calib_dc_range_sig_cfg.mean[1, positive_bin_idx + 1:] = np.sum(fft_out1_d[1::2, :, negative_bin_idx:],
277 | axis=(0, 1))
278 |
279 | calib_dc_range_sig_cfg.counter += 1
280 |
281 | if calib_dc_range_sig_cfg.counter == (calib_dc_range_sig_cfg.num_frames * num_tx_antennas):
282 | # Divide
283 | num_avg_chirps = calib_dc_range_sig_cfg.num_frames * num_chirps_per_frame
284 | calib_dc_range_sig_cfg.mean /= num_avg_chirps
285 |
286 | else:
287 | # fft_out1_d -= mean
288 | fft_out1_d[0::2, :, :positive_bin_idx + 1] -= calib_dc_range_sig_cfg.mean[0, :positive_bin_idx + 1]
289 | fft_out1_d[0::2, :, positive_bin_idx + 1:] -= calib_dc_range_sig_cfg.mean[0, positive_bin_idx + 1:]
290 | fft_out1_d[1::2, :, :positive_bin_idx + 1] -= calib_dc_range_sig_cfg.mean[1, :positive_bin_idx + 1]
291 | fft_out1_d[1::2, :, positive_bin_idx + 1:] -= calib_dc_range_sig_cfg.mean[1, positive_bin_idx + 1:]
292 |
293 |
294 | def clutter_removal(input_val, axis=0):
295 | """Perform basic static clutter removal by removing the mean from the input_val on the specified doppler axis.
296 |
297 | Args:
298 | input_val (ndarray): Array to perform static clutter removal on. Usually applied before performing doppler FFT.
299 | e.g. [num_chirps, num_vx_antennas, num_samples], it is applied along the first axis.
300 | axis (int): Axis to calculate mean of pre-doppler.
301 |
302 | Returns:
303 | ndarray: Array with static clutter removed.
304 |
305 | """
306 | # Reorder the axes
307 | reordering = np.arange(len(input_val.shape))
308 | reordering[0] = axis
309 | reordering[axis] = 0
310 | input_val = input_val.transpose(reordering)
311 |
312 | # Apply static clutter removal
313 | mean = input_val.transpose(reordering).mean(0)
314 | output_val = input_val - mean
315 |
316 | return output_val.transpose(reordering)
317 |
--------------------------------------------------------------------------------
/dsp/doppler_processing.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | import numpy as np
14 | from . import compensation
15 | from . import utils
16 |
17 |
18 | def doppler_resolution(band_width, start_freq_const=77, ramp_end_time=62, idle_time_const=100, num_loops_per_frame=128,
19 | num_tx_antennas=3):
20 | """Calculate the doppler resolution for the given radar configuration.
21 |
22 | Args:
23 | start_freq_const (int): Frequency chirp starting point.
24 | ramp_end_time (int): Frequency chirp end point.
25 | idle_time_const (int): Idle time between chirps.
26 | band_width (float): Radar config bandwidth.
27 | num_loops_per_frame (int): The number of loops in each frame.
28 | num_tx_antennas (int): The number of transmitting antennas (tx) on the radar.
29 |
30 | Returns:
31 | doppler_resolution (float): The doppler resolution for the given radar configuration.
32 |
33 | """
34 |
35 | light_speed_meter_per_sec = 299792458
36 |
37 | center_frequency = start_freq_const * 1e9 + band_width / 2
38 | chirp_interval = (ramp_end_time + idle_time_const) * 1e-6
39 | doppler_resolution = light_speed_meter_per_sec / (
40 | 2 * num_loops_per_frame * num_tx_antennas * center_frequency * chirp_interval)
41 |
42 | return doppler_resolution
43 |
44 |
45 | def separate_tx(signal, num_tx, vx_axis=1, axis=0):
46 | """Separate interleaved radar data from separate TX along a certain axis to account for TDM radars.
47 |
48 | Args:
49 | signal (ndarray): Received signal.
50 | num_tx (int): Number of transmit antennas.
51 | vx_axis (int): Axis in which to accumulate the separated data.
52 | axis (int): Axis in which the data is interleaved.
53 |
54 | Returns:
55 | ndarray: Separated received data in the
56 |
57 | """
58 | # Reorder the axes
59 | reordering = np.arange(len(signal.shape))
60 | reordering[0] = axis
61 | reordering[axis] = 0
62 | signal = signal.transpose(reordering)
63 |
64 | out = np.concatenate([signal[i::num_tx, ...] for i in range(num_tx)], axis=vx_axis)
65 |
66 | return out.transpose(reordering)
67 |
68 |
69 | def doppler_processing(radar_cube,
70 | num_tx_antennas=2,
71 | clutter_removal_enabled=False,
72 | interleaved=False,
73 | window_type_2d=None,
74 | accumulate=True):
75 | """Perform 2D FFT on the radar_cube.
76 |
77 | Interleave the radar_cube, perform optional windowing and 2D FFT on the radar_cube. Optional antenna couping
78 | signature removal can also be performed right before 2D FFT. In constrast to the original TI codes, CFAR and peak
79 | grouping are intentionally separated with 2D FFT for the easiness of debugging.
80 |
81 | Args:
82 | radar_cube (ndarray): Output of the 1D FFT. If not interleaved beforehand, it has the shape of
83 | (numChirpsPerFrame, numRxAntennas, numRangeBins). Otherwise, it has the shape of
84 | (numRangeBins, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler
85 | dimension is located at the last axis.
86 | num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.
87 | clutter_removal_enabled (boolean): Flag to enable naive clutter removal.
88 | interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not
89 | interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numRangeBins). The interleaving
90 | process will transform it such that it becomes (numRangeBins, numVirtualAntennas, num_doppler_bins). Note
91 | that this interleaving is only applicable to TDM radar, i.e. each tx emits the chirp sequentially.
92 | window_type_2d (mmwave.dsp.utils.Window): Optional windowing type before doppler FFT.
93 | accumulate (boolean): Flag to reduce the numVirtualAntennas dimension.
94 |
95 | Returns:
96 | detMatrix (ndarray): (numRangeBins, num_doppler_bins) complete range-dopper information. Original datatype is
97 | uint16_t. Note that azimuthStaticHeatMap can be extracted from zero-doppler index for
98 | visualization.
99 | aoa_input (ndarray): (numRangeBins, numVirtualAntennas, num_doppler_bins) ADC data reorganized by vrx instead of
100 | physical rx.
101 | """
102 |
103 | if interleaved:
104 | # radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,
105 | # and even are from tx2) so it becomes ( , numVirtualAntennas, numADCSamples), where
106 | # numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.
107 | # Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.
108 | fft2d_in = separate_tx(radar_cube, num_tx_antennas, vx_axis=1, axis=0)
109 | else:
110 | fft2d_in = radar_cube
111 |
112 | # (Optional) Static Clutter Removal
113 | if clutter_removal_enabled:
114 | fft2d_in = compensation.clutter_removal(fft2d_in, axis=0)
115 |
116 | # transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)
117 | fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0)) # numADCSamples, numRxAntennas, numChirpsPerFrame,
118 |
119 | # Windowing 16x32
120 | if window_type_2d:
121 | fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=2)
122 |
123 | # It is assumed that doppler is at the last axis.
124 | # FFT 32x32
125 | fft2d_out = np.fft.fft(fft2d_in)
126 | aoa_input = fft2d_out
127 |
128 | # Save zero-Doppler as azimuthStaticHeatMap, watch out for the bit shift in
129 | # original code.
130 |
131 | # Log_2 Absolute Value
132 | fft2d_log_abs = np.log2(np.abs(fft2d_out))
133 |
134 | # Accumulate
135 | if accumulate:
136 | return np.sum(fft2d_log_abs, axis=1), aoa_input
137 | else:
138 | # return fft2d_log_abs, aoa_input
139 | return fft2d_out, aoa_input
140 |
141 |
142 | def doppler_estimation(radar_cube,
143 | beam_weights,
144 | num_tx_antennas=2,
145 | clutter_removal_enabled=False,
146 | interleaved=False,
147 | window_type_2d=None):
148 | """Perform doppler estimation on the weighted sum of range FFT output across all virtual antennas.
149 |
150 | In contrast to directly computing doppler FFT from the output of range FFT, this function combines it across all
151 | the virtual receivers first using the weights generated from beamforming. Then FFT is performed and argmax is taken
152 | across each doppler axis to return the indices of max doppler values.
153 |
154 | Args:
155 | radar_cube (ndarray): Output of the 1D FFT with only ranges on detected objects. If not interleaved beforehand,
156 | it has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). Otherwise, it has the shape of
157 | (numDetObjs, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler
158 | dimension is located at the last axis.
159 | beam_weights (ndarray): Weights to sum up the radar_cube across the virtual receivers. It is from the
160 | beam-forming and has the shape of (numVirtualAntennas, numDetObjs)
161 | num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.
162 | clutter_removal_enabled (boolean): Flag to enable naive clutter removal.
163 | interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not
164 | interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). The interleaveing process
165 | will transform it such that it becomes (numDetObjs, numVirtualAntennas, num_doppler_bins). Note that this
166 | interleaving is only appliable to TDM radar, i.e. each tx emits the chirp sequentially.
167 | window_type_2d (string): Optional windowing type before doppler FFT.
168 |
169 | Returns:
170 | doppler_est (ndarray): (numDetObjs) Doppler index for each detected objects. Positive index means moving away
171 | from radar while negative index means moving towards the radar.
172 | """
173 | fft2d_in = None
174 | if not interleaved:
175 | num_doppler_bins = radar_cube.shape[0] / num_tx_antennas
176 | # radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,
177 | # and even are from tx2) so it becomes (num_doppler_bins, numVirtualAntennas, numADCSamples), where
178 | # numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.
179 | # Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.
180 | if num_tx_antennas == 2:
181 | fft2d_in = np.concatenate((radar_cube[0::2, ...], radar_cube[1::2, ...]), axis=1)
182 | elif num_tx_antennas == 3:
183 | fft2d_in = np.concatenate((radar_cube[0::3, ...], radar_cube[1::3, ...], radar_cube[2::3, ...]), axis=1)
184 |
185 | # transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)
186 | fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0))
187 | else:
188 | num_doppler_bins = radar_cube.shape[2]
189 |
190 | # (Optional) Static Clutter Removal
191 | if clutter_removal_enabled:
192 | fft2d_in = compensation.clutter_removal(fft2d_in)
193 |
194 | # Weighted sum across all virtual receivers.
195 | fft2d_in = np.einsum('ijk,jk->ik', fft2d_in, beam_weights)
196 |
197 | # Windowing 16x32
198 | if window_type_2d:
199 | fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=1)
200 |
201 | # It is assumed that doppler is at the last axis.
202 | # FFT 32x32
203 | doppler_est = np.fft.fft(fft2d_in)
204 | doppler_est = np.argmax(doppler_est, axis=1)
205 | doppler_est[doppler_est[:] >= num_doppler_bins] -= num_doppler_bins * 2
206 |
207 | return doppler_est
208 |
--------------------------------------------------------------------------------
/dsp/music.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import numpy.linalg as LA
3 | from .angle_estimation import cov_matrix
4 |
5 | def _noise_subspace(covariance, num_sources):
6 | """helper function to get noise_subspace.
7 | """
8 | if covariance.ndim != 2 or covariance.shape[0] != covariance.shape[1]:
9 | raise ValueError("covariance matrix should be a 2D square matrix.")
10 | if num_sources >= covariance.shape[0]:
11 | raise ValueError("number of sources should be less than number of receivers.")
12 | _, v = LA.eigh(covariance)
13 |
14 | return v[:, :-num_sources]
15 |
16 | def aoa_music_1D(steering_vec, rx_chirps, num_sources):
17 | """Implmentation of 1D MUltiple SIgnal Classification (MUSIC) algorithm on ULA (Uniformed Linear Array).
18 |
19 | Current implementation assumes covariance matrix is not rank deficient and ULA spacing is half of the wavelength.
20 | .. math::
21 | P_{} (\\theta) = \\frac{1}{a^{H}(\\theta) \mathbf{E}_\mathrm{n}\mathbf{E}_\mathrm{n}^H a(\\theta)}
22 | where :math:`E_{n}` is the noise subpace and :math:`a` is the steering vector.
23 |
24 |
25 | Args:
26 | steering_vec (~np.ndarray): steering vector with the shape of (FoV/angel_resolution, num_ant).
27 | FoV/angel_resolution is usually 181. It is generated from gen_steering_vec() function.
28 | rx_chirps (~np.ndarray): Ouput of the 1D range FFT. The shape is (num_ant, num_chirps_per_frame).
29 | num_sources (int): Number of sources in the scene. Needs to be smaller than num_ant for ULA.
30 |
31 | Returns:
32 | (~np.ndarray): the spectrum of the MUSIC. Objects should be holes for the equation and thus sharp peaks.
33 | """
34 | num_antennas = rx_chirps.shape[0]
35 | assert num_antennas == steering_vec.shape[1], "Mismatch between number of receivers in "
36 | if num_antennas < num_sources:
37 | raise ValueError("number of sources shoule not exceed number ")
38 |
39 | R = cov_matrix(rx_chirps)
40 | noise_subspace = _noise_subspace(R, num_sources)
41 | v = noise_subspace.T.conj() @ steering_vec.T
42 | spectrum = np.reciprocal(np.sum(v * v.conj(), axis=0).real)
43 |
44 | return spectrum
45 |
46 | def aoa_root_music_1D(steering_vec, rx_chirps, num_sources):
47 | """Implmentation of 1D root MUSIC algorithm on ULA (Uniformed Linear Array).
48 |
49 | The root MUSIC follows the same equation as the original MUSIC, only to solve the equation instead of perform
50 | matrix multiplication.
51 | This implementations referred to the github.com/morriswmz/doatools.py
52 |
53 | Args:
54 | steering_vec (~np.ndarray): steering vector with the shape of (FoV/angel_resolution, num_ant).
55 | FoV/angel_resolution is usually 181. It is generated from gen_steering_vec() function.
56 | rx_chirps (~np.ndarray): Ouput of the 1D range FFT. The shape is (num_ant, num_chirps_per_frame).
57 | num_sources (int): Number of sources in the scene. Needs to be smaller than num_ant for ULA.
58 |
59 | Returns:
60 | (~np.ndarray): the spectrum of the MUSIC. Objects should be holes for the equation and thus sharp peaks.
61 | """
62 | num_antennas = rx_chirps.shape[0]
63 | assert num_antennas == steering_vec.shape[1], "Mismatch between number of receivers in "
64 | if num_antennas < num_sources:
65 | raise ValueError("number of sources shoule not exceed number ")
66 |
67 | R = cov_matrix(rx_chirps)
68 | noise_subspace = _noise_subspace(R, num_sources)
69 | v = noise_subspace @ noise_subspace.T.conj()
70 | coeffs = np.zeros(num_antennas-1, dtype=np.complex64)
71 | for i in range(1, num_antennas):
72 | coeffs[i - 1] += np.sum(np.diag(v, i))
73 | coeffs = np.hstack((coeffs[::-1], np.sum(np.diag(v)), coeffs.conj()))
74 |
75 | z = np.roots(coeffs)
76 | z = np.abs(z[z <= 1.0])
77 | if len(z) < num_sources:
78 | return None
79 | z.sort()
80 | z = z[-num_sources:]
81 |
82 | # Assume to be half wavelength spacing
83 | sin_vals = np.angle(z) / np.pi
84 | locations = np.rad2deg(np.arcsin(sin_vals))
85 |
86 | return locations
87 |
88 | def aoa_spatial_smoothing(covariance_matrix, num_subarrays, forward_backward=False):
89 | """Perform spatial smoothing on the precomputed covariance matrix.
90 |
91 | Spatial smoothing is to decorrelate the coherent sources. It is performed over covariance matrix.
92 | This implementations referred to the github.com/morriswmz/doatools.py
93 |
94 | Args:
95 | covariance_matrx (~np.ndarray): Covariance matrix of input signal.
96 | num_subarrays (int): Number of subarrays to perform the spatial smoothing.
97 | forward_backward (bool): If True, perform backward smoothing as well.
98 |
99 | Returns:
100 | (~np.ndarray): Decorrelated covariance matrix.
101 | """
102 | num_receivers = covariance_matrix.shape[0]
103 | assert num_subarrays >=1 and num_subarrays <= num_receivers, "num_subarrays is wrong"
104 |
105 | # Forward pass
106 | result = covariance_matrix[:num_receivers-num_subarrays+1, :num_receivers-num_subarrays+1].copy()
107 | for i in range(1, num_subarrays):
108 | result += covariance_matrix[i:i+num_receivers-num_subarrays+1, i:i+num_receivers-num_subarrays+1]
109 | result /= num_subarrays
110 | if not forward_backward:
111 | return result
112 |
113 | # Adds backward pass
114 | if np.iscomplexobj(result):
115 | return 0.5 * (result + np.flip(result).conj())
116 | else:
117 | return 0.5 * (result + np.flip(result))
118 |
119 | def aoa_esprit(steering_vec, rx_chirps, num_sources, displacement):
120 | """ Perform Estimation of Signal Parameters via Rotation Invariance Techniques (ESPIRIT) for Angle of Arrival.
121 |
122 | ESPRIT exploits the structure in the signal subspace.
123 |
124 | Args:
125 | steering_vec (~np.ndarray): steering vector with the shape of (FoV/angel_resolution, num_ant).
126 | FoV/angel_resolution is usually 181. It is generated from gen_steering_vec() function.
127 | rx_chirps (~np.ndarray): Ouput of the 1D range FFT. The shape is (num_ant, num_chirps_per_frame).
128 | num_sources (int): Number of sources in the scene. Needs to be smaller than num_ant for ULA.
129 | displacement (int): displacmenet between two subarrays.
130 |
131 | Returns:
132 | (~np.ndarray): the spectrum of the ESPRIT. Objects should be holes for the equation and thus sharp peaks.
133 | """
134 | num_antennas = rx_chirps.shape[0]
135 | if displacement > num_antennas/2 or displacement <= 0:
136 | raise ValueError("The separation between two subarrays can only range from 1 to half of the original array size.")
137 |
138 | subarray1 = rx_chirps[:num_antennas - displacement]
139 | subarray2 = rx_chirps[displacement:]
140 | assert subarray1.shape == subarray2.shape, "separating subarrays encounters error."
141 |
142 | R1 = cov_matrix(subarray1)
143 | R2 = cov_matrix(subarray2)
144 | _, v1 = LA.eigh(R1)
145 | _, v2 = LA.eigh(R2)
146 |
147 | E1 = v1[:, -num_sources:]
148 | E2 = v2[:, -num_sources:]
149 | C = np.concatenate((E1.T.conj(), E2.T.conj()), axis=0) @ np.concatenate((E1, E2), axis=1)
150 | _, Ec = LA.eigh(C)
151 | Ec = Ec[::-1, :]
152 |
153 | phi = -Ec[:num_antennas, num_antennas:] @ LA.inv(Ec[num_antennas:, num_antennas:])
154 | w, _ = LA.eig(phi)
155 |
156 | sin_vals = np.angle(w) / np.pi
157 | locations = np.rad2deg(np.arcsin(sin_vals))
158 |
159 | return locations
--------------------------------------------------------------------------------
/dsp/noise_removal.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | import numpy as np
14 |
15 |
16 | def peak_grouping_along_doppler(det_obj_2d,
17 | det_matrix,
18 | num_doppler_bins):
19 | """Perform peak grouping along the doppler direction only.
20 | This is a temporary remedy for the slow and old implementation of peak_grouping_qualified() function residing in
21 | dsp.py currently. Will merge this back to there to enable more generic peak grouping.
22 | """
23 | num_det_objs = det_obj_2d.shape[0]
24 | range_idx = det_obj_2d['rangeIdx']
25 | doppler_idx = det_obj_2d['dopplerIdx']
26 | kernel = np.zeros((num_det_objs, 3), dtype=np.float32)
27 | kernel[:, 0] = det_matrix[range_idx, doppler_idx - 1]
28 | kernel[:, 1] = det_obj_2d['peakVal'].astype(np.float32)
29 | kernel[:, 2] = det_matrix[range_idx, (doppler_idx + 1) % num_doppler_bins]
30 | detectedFlag = (kernel[:, 1] > kernel[:, 0]) & (kernel[:, 1] > kernel[:, 2])
31 | return det_obj_2d[detectedFlag]
32 |
33 |
34 | def range_based_pruning(det_obj_2d_raw,
35 | snr_thresh,
36 | peak_val_thresh,
37 | max_range,
38 | min_range,
39 | range_resolution):
40 | """Filter out the objects out of the range and not sufficing SNR/peakVal requirement.
41 |
42 | Filter out the objects based on the two following conditions:
43 | 1. Not within [min_range and max_range].
44 | 2. Does not satisfy SNR/peakVal requirement, where it requires higher standard when closer and lower when further.
45 | """
46 | det_obj_2d = det_obj_2d_raw[(det_obj_2d_raw['rangeIdx'] >= min_range) & \
47 | (det_obj_2d_raw['rangeIdx'] <= max_range)]
48 | snr_idx1 = (det_obj_2d['SNR'] > snr_thresh[0, 1]) & (det_obj_2d['rangeIdx'] * range_resolution < snr_thresh[0, 0])
49 | snr_idx2 = (det_obj_2d['SNR'] > snr_thresh[1, 1]) & \
50 | (det_obj_2d['rangeIdx'] * range_resolution < snr_thresh[1, 0]) & \
51 | (det_obj_2d['rangeIdx'] * range_resolution >= snr_thresh[0, 0])
52 | snr_idx3 = (det_obj_2d['SNR'] > snr_thresh[2, 1]) & (det_obj_2d['rangeIdx'] * range_resolution > snr_thresh[1, 0])
53 | snr_idx = snr_idx1 | snr_idx2 | snr_idx3
54 |
55 | peak_val_idx = np.logical_not((det_obj_2d['peakVal'] < peak_val_thresh[0, 1]) & \
56 | (det_obj_2d['rangeIdx'] * range_resolution < peak_val_thresh[0, 0]))
57 | combined_idx = snr_idx & peak_val_idx
58 | det_obj_2d = det_obj_2d[combined_idx]
59 |
60 | return det_obj_2d
61 |
62 |
63 | def prune_to_peaks(det_obj2_d_raw,
64 | det_matrix,
65 | num_doppler_bins,
66 | reserve_neighbor=False):
67 | """Reduce the CFAR detected output to local peaks.
68 |
69 | Reduce the detected output to local peaks. If reserveNeighbor is toggled, will also return the larger neighbor. For
70 | example, given an array [2, 1, 5, 3, 2], default method will return [2, 5] while reserve neighbor will return
71 | [2, 5, 3]. The neighbor has to be a larger neighbor of the two immediate ones and also be part of the peak. the 1st
72 | element "1" in the example is not returned because it's smaller than both sides so that it is not part of the peak.
73 |
74 | Args:
75 | det_obj2_d_raw (np.ndarray): The detected objects structured array which contains the range_idx, doppler_idx,
76 | peakVal and SNR, etc.
77 | det_matrix (np.ndarray): Output of doppler FFT with virtual antenna dimensions reduced. It has the shape of
78 | (num_range_bins, num_doppler_bins).
79 | num_doppler_bins (int): Number of doppler bins.
80 | reserve_neighbor (boolean): if toggled, will return both peaks and the larger neighbors.
81 |
82 | Returns:
83 | cfar_det_obj_index_pruned (np.ndarray): Pruned version of cfar_det_obj_index.
84 | cfar_det_obj_SNR_pruned (np.ndarray): Pruned version of cfar_det_obj_SNR.
85 | """
86 |
87 | range_idx = det_obj2_d_raw['rangeIdx']
88 | doppler_idx = det_obj2_d_raw['dopplerIdx']
89 | next_idx = doppler_idx + 1
90 | next_idx[doppler_idx == num_doppler_bins - 1] = 0
91 | prev_idx = doppler_idx - 1
92 | prev_idx[doppler_idx == 0] = num_doppler_bins - 1
93 |
94 | prev_val = det_matrix[range_idx, prev_idx]
95 | current_val = det_matrix[range_idx, doppler_idx]
96 | next_val = det_matrix[range_idx, next_idx]
97 |
98 | if reserve_neighbor:
99 | next_next_idx = next_idx + 1
100 | next_next_idx[next_idx == num_doppler_bins - 1] = 0
101 | prev_prev_idx = prev_idx - 1
102 | prev_prev_idx[prev_idx == 0] = num_doppler_bins - 1
103 |
104 | prev_prev_val = det_matrix[range_idx, prev_prev_idx]
105 | next_next_val = det_matrix[range_idx, next_next_idx]
106 | is_neighbor_of_peak_next = (current_val > next_next_val) & (current_val > prev_val)
107 | is_neighbor_of_peak_prev = (current_val > prev_prev_val) & (current_val > next_val)
108 |
109 | pruned_idx = (current_val > prev_val) & (current_val > next_val) | is_neighbor_of_peak_next | is_neighbor_of_peak_prev
110 | else:
111 | pruned_idx = (current_val > prev_val) & (current_val > next_val)
112 |
113 | det_obj2_d_pruned = det_obj2_d_raw[pruned_idx]
114 |
115 | return det_obj2_d_pruned
116 |
--------------------------------------------------------------------------------
/dsp/range_processing.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | import numpy as np
14 | from . import utils
15 | from . import ZoomFFT
16 |
17 | def range_resolution(num_adc_samples, dig_out_sample_rate=2500, freq_slope_const=60.012):
18 | """ Calculate the range resolution for the given radar configuration
19 |
20 | Args:
21 | num_adc_samples (int): The number of given ADC samples in a chirp
22 | dig_out_sample_rate (int): The ADC sample rate
23 | freq_slope_const (float): The slope of the freq increase in each chirp
24 |
25 | Returns:
26 | tuple [float, float]:
27 | range_resolution (float): The range resolution for this bin
28 | band_width (float): The bandwidth of the radar chirp config
29 | """
30 | light_speed_meter_per_sec = 299792458
31 | freq_slope_m_hz_per_usec = freq_slope_const
32 | adc_sample_period_usec = 1000.0 / dig_out_sample_rate * num_adc_samples
33 | band_width = freq_slope_m_hz_per_usec * adc_sample_period_usec * 1e6
34 | range_resolution = light_speed_meter_per_sec / (2.0 * band_width)
35 |
36 | return range_resolution, band_width
37 |
38 |
39 | def range_processing(adc_data, window_type_1d=None, axis=-1):
40 | """Perform 1D FFT on complex-format ADC data.
41 |
42 | Perform optional windowing and 1D FFT on the ADC data.
43 |
44 | Args:
45 | adc_data (ndarray): (num_chirps_per_frame, num_rx_antennas, num_adc_samples). Performed on each frame. adc_data
46 | is in complex by default. Complex is float32/float32 by default.
47 | window_type_1d (mmwave.dsp.utils.Window): Optional window type on 1D FFT input. Default is None. Can be selected
48 | from Bartlett, Blackman, Hanning and Hamming.
49 |
50 | Returns:
51 | radar_cube (ndarray): (num_chirps_per_frame, num_rx_antennas, num_range_bins). Also called fft_1d_out
52 | """
53 | # windowing numA x numB suggests the coefficients is numA-bits while the
54 | # input and output are numB-bits. Same rule applies to the FFT.
55 | fft1d_window_type = window_type_1d
56 | if fft1d_window_type:
57 | fft1d_in = utils.windowing(adc_data, fft1d_window_type, axis=axis)
58 | else:
59 | fft1d_in = adc_data
60 |
61 | # Note: np.fft.fft is a 1D operation, using higher dimension input defaults to slicing last axis for transformation
62 | radar_cube = np.fft.fft(fft1d_in, n = 64,axis=axis)
63 | # radar_cube = np.fft.fft(fft1d_in, axis=axis)
64 |
65 | return radar_cube
66 |
67 |
68 | def zoom_range_processing(adc_data, low_freq, high_freq, fs, d, resample_number):
69 | """Perform ZoomFFT on complex-format ADC data in a user-defined frequency range.
70 |
71 | Args:
72 | adc_data (ndarray): (num_chirps_per_frame, num_rx_antennas, num_adc_samples). Performed on each frame. adc_data
73 | is in complex by default. Complex is float32/float32 by default.
74 | low_freq (int): a user-defined number which specifies the lower bound on the range of frequency spectrum which
75 | the user would like to zoom on
76 | high_freq (int): a user-defined number which specifies the higher bound on the range of frequency spectrum which
77 | the user would like to zoom on
78 | fs (int) : sampling rate of the original signal
79 | d (int): Sample spacing (inverse of the sampling rate)
80 | resample_number (int): The number of samples in the re-sampled signal.
81 |
82 | Returns:
83 | zoom_fft_spectrum (ndarray): (num_chirps_per_frame, num_rx_antennas, resample_number).
84 | """
85 | # adc_data shape: [num_chirps_per_frame, num_rx_antennas, num_range_bins]
86 | num_chirps_per_frame = adc_data.shape[0]
87 | num_rx_antennas = adc_data.shape[1]
88 | # num_range_bins = adc_data.shape[2]
89 |
90 | zoom_fft_spectrum = np.zeros(shape=(num_chirps_per_frame, num_rx_antennas, resample_number))
91 |
92 | for i in range(num_chirps_per_frame):
93 | for j in range(num_rx_antennas):
94 | zoom_fft_inst = ZoomFFT.ZoomFFT(low_freq, high_freq, fs, adc_data[i, j, :])
95 | zoom_fft_inst.compute_fft()
96 | zoom_fft_spectrum[i, j, :] = zoom_fft_inst.compute_zoomfft()
97 |
98 | return zoom_fft_spectrum
99 |
100 |
101 | def zoom_fft_visualize(zoom_fft_spectrum, antenna_idx, range_bin_idx):
102 | '''to be implemented'''
103 | pass
104 |
--------------------------------------------------------------------------------
/dsp/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | # ==============================================================================
12 |
13 | import numpy as np
14 | try:
15 | from enum import Enum
16 | except ImportError:
17 | print("enum only exists in Python 3.4 or newer")
18 |
19 | try:
20 | class Window(Enum):
21 | BARTLETT = 1
22 | BLACKMAN = 2
23 | HAMMING = 3
24 | HANNING = 4
25 | except NameError:
26 | class Window:
27 | BARTLETT = 1
28 | BLACKMAN = 2
29 | HAMMING = 3
30 | HANNING = 4
31 |
32 | RANGEIDX = 0
33 | DOPPLERIDX = 1
34 | PEAKVAL = 2
35 |
36 | MAX_OBJ_OUT = 100
37 |
38 | def windowing(input, window_type, axis=0):
39 | """Window the input based on given window type.
40 |
41 | Args:
42 | input: input numpy array to be windowed.
43 |
44 | window_type: enum chosen between Bartlett, Blackman, Hamming, Hanning and Kaiser.
45 |
46 | axis: the axis along which the windowing will be applied.
47 |
48 | Returns:
49 |
50 | """
51 |
52 | # 输入为(num_chirps_per_frame, num_adc_samples, num_rx_antennas)
53 | # 要转(num_chirps_per_frame, num_rx_antennas, num_adc_samples)
54 |
55 | window_length = input.shape[axis]
56 | if window_type == Window.BARTLETT:
57 | window = np.bartlett(window_length)
58 | elif window_type == Window.BLACKMAN:
59 | window = np.blackman(window_length)
60 | elif window_type == Window.HAMMING:
61 | window = np.hamming(window_length)
62 | elif window_type == Window.HANNING:
63 | window = np.hanning(window_length)
64 | else:
65 | raise ValueError("The specified window is not supported!!!")
66 | output = input * window
67 | return output
68 |
69 |
--------------------------------------------------------------------------------
/firmware/xwr1843_mmw_demo.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/firmware/xwr1843_mmw_demo.bin
--------------------------------------------------------------------------------
/firmware/xwr6443_mmw_demo.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/firmware/xwr6443_mmw_demo.bin
--------------------------------------------------------------------------------
/firmware/xwr6843AOP_mmw_demo.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/firmware/xwr6843AOP_mmw_demo.bin
--------------------------------------------------------------------------------
/firmware/xwr6843ISK_mmw_demo.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/firmware/xwr6843ISK_mmw_demo.bin
--------------------------------------------------------------------------------
/firmware/xwr6843ODS_mmw_demo.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/firmware/xwr6843ODS_mmw_demo.bin
--------------------------------------------------------------------------------
/gesture_icons/0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/0.jpg
--------------------------------------------------------------------------------
/gesture_icons/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/1.jpg
--------------------------------------------------------------------------------
/gesture_icons/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/2.jpg
--------------------------------------------------------------------------------
/gesture_icons/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/3.jpg
--------------------------------------------------------------------------------
/gesture_icons/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/4.jpg
--------------------------------------------------------------------------------
/gesture_icons/5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/5.jpg
--------------------------------------------------------------------------------
/gesture_icons/6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/6.jpg
--------------------------------------------------------------------------------
/gesture_icons/7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/gesture_icons/7.jpg
--------------------------------------------------------------------------------
/globalvar.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | def _init():
5 | global _global_dict
6 | _global_dict = {}
7 |
8 | def set_value(name, value):
9 | _global_dict[name] = value
10 |
11 | def get_value(name, defValue=None):
12 | try:
13 | return _global_dict[name]
14 | except KeyError:
15 | return defValue
16 |
--------------------------------------------------------------------------------
/img/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/img/1.png
--------------------------------------------------------------------------------
/img/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/img/2.png
--------------------------------------------------------------------------------
/img/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/img/3.png
--------------------------------------------------------------------------------
/img/4.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/img/4.gif
--------------------------------------------------------------------------------
/img/5.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/img/5.mp4
--------------------------------------------------------------------------------
/img/6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/img/6.jpg
--------------------------------------------------------------------------------
/img/7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/img/7.jpg
--------------------------------------------------------------------------------
/iwr6843_tlv/detected_points.py:
--------------------------------------------------------------------------------
1 | import serial
2 | import numpy as np
3 | import time
4 | import struct
5 |
6 | DEBUG=False
7 | MAGIC_WORD_ARRAY = np.array([2, 1, 4, 3, 6, 5, 8, 7])
8 | MAGIC_WORD = b'\x02\x01\x04\x03\x06\x05\x08\x07'
9 | MSG_AZIMUT_STATIC_HEAT_MAP = 8
10 |
11 |
12 | class IWR6843AOP_TLV:
13 | def __init__(self, sdk_version=3.4, cli_baud=115200,data_baud=921600, num_rx=4, num_tx=3,
14 | verbose=False, connect=True, mode=0,cli_loc='COM9',data_loc='COM10',config_file=""):
15 | super(IWR6843AOP_TLV, self).__init__()
16 | self.connected = False
17 | self.verbose = verbose
18 | self.mode = mode
19 | if connect:
20 | # self.cli_port = serial.Serial(cli_loc, cli_baud)
21 | # self.data_port = serial.Serial(data_loc, data_baud)
22 | self.connected = True
23 | self.sdk_version = sdk_version
24 | self.num_rx_ant = num_rx
25 | self.num_tx_ant = num_tx
26 | self.num_virtual_ant = num_rx * num_tx
27 | self.config_file = config_file
28 | # if mode == 0:
29 | # self._initialize(self.config_file)
30 |
31 | def _configure_radar(self, config):
32 | for i in config:
33 | self.cli_port.write((i + '\n').encode())
34 | # print(i)
35 | time.sleep(0.01)
36 |
37 | def _initialize(self, config_file):
38 | config = [line.rstrip('\r\n') for line in open(config_file)]
39 | if self.connected:
40 | pass
41 | # self._configure_radar(config)
42 |
43 | self.config_params = {} # Initialize an empty dictionary to store the configuration parameters
44 |
45 | for i in config:
46 |
47 | # Split the line
48 | split_words = i.split(" ")
49 |
50 | # Hard code the number of antennas, change if other configuration is used
51 | num_rx_ant = 4
52 | num_tx_ant = 3
53 |
54 | # Get the information about the profile configuration
55 | if "profileCfg" in split_words[0]:
56 | start_freq = int(split_words[2])
57 | idle_time = int(split_words[3])
58 | ramp_end_time = float(split_words[5])
59 | freq_slope_const = int(split_words[8])
60 | num_adc_samples = int(split_words[10])
61 | num_adc_samples_round_to2 = 1
62 |
63 | while num_adc_samples > num_adc_samples_round_to2:
64 | num_adc_samples_round_to2 = num_adc_samples_round_to2 * 2
65 |
66 | dig_out_sample_rate = int(split_words[11])
67 |
68 | # Get the information about the frame configuration
69 | elif "frameCfg" in split_words[0]:
70 |
71 | chirp_start_idx = int(split_words[1])
72 | chirp_end_idx = int(split_words[2])
73 | num_loops = int(split_words[3])
74 | num_frames = int(split_words[4])
75 | frame_periodicity = float(split_words[5])
76 |
77 | # Combine the read data to obtain the configuration parameters
78 | num_chirps_per_frame = (chirp_end_idx - chirp_start_idx + 1) * num_loops
79 | self.config_params["numDopplerBins"] = num_chirps_per_frame / num_tx_ant
80 | self.config_params["numRangeBins"] = num_adc_samples_round_to2
81 | self.config_params["rangeResolutionMeters"] = round((3e8 * dig_out_sample_rate * 1e3) / (
82 | 2 * freq_slope_const * 1e12 * num_adc_samples),2)
83 | self.config_params["rangeIdxToMeters"] = (3e8 * dig_out_sample_rate * 1e3) / (
84 | 2 * freq_slope_const * 1e12 * self.config_params["numRangeBins"])
85 | self.config_params["dopplerResolutionMps"] =round( 3e8 / (
86 | 2 * start_freq * 1e9 * (idle_time + ramp_end_time) * 1e-6 * self.config_params[
87 | "numDopplerBins"] * num_tx_ant),2)
88 | self.config_params["maxRange"] = round((300 * 0.9 * dig_out_sample_rate) / (2 * freq_slope_const * 1e3),2)
89 | self.config_params["maxVelocity"] =round( 3e8 / (
90 | 4 * start_freq * 1e9 * (idle_time + ramp_end_time) * 1e-6 * num_tx_ant),2)
91 | return self.config_params
92 |
93 |
94 | def close(self):
95 | """End connection between radar and machine
96 |
97 | Returns:
98 | None
99 |
100 | """
101 | self.cli_port.write('sensorStop\n'.encode())
102 | self.cli_port.close()
103 | self.data_port.close()
104 |
105 | def _read_buffer(self):
106 | """
107 |
108 | Returns:
109 |
110 | """
111 | byte_buffer = self.data_port.read(self.data_port.in_waiting)
112 |
113 | return byte_buffer
114 |
115 | def _parse_header_data(self, byte_buffer, idx):
116 | """Parses the byte buffer for the header of the data
117 |
118 | Args:
119 | byte_buffer: Buffer with TLV data
120 | idx: Current reading index of the byte buffer
121 |
122 | Returns:
123 | Tuple [Tuple (int), int]
124 |
125 | """
126 | magic, idx = self._unpack(byte_buffer, idx, order='>', items=1, form='Q')
127 | (version, length, platform, frame_num, cpu_cycles, num_obj, num_tlvs), idx = self._unpack(byte_buffer, idx,
128 | items=7, form='I')
129 | subframe_num, idx = self._unpack(byte_buffer, idx, items=1, form='I')
130 | return (version, length, platform, frame_num, cpu_cycles, num_obj, num_tlvs, subframe_num), idx
131 |
132 | def _parse_header_tlv(self, byte_buffer, idx):
133 | """ Parses the byte buffer for the header of a tlv
134 |
135 | """
136 | (tlv_type, tlv_length), idx = self._unpack(byte_buffer, idx, items=2, form='I')
137 | return (tlv_type, tlv_length), idx
138 |
139 | def _parse_msg_detected_points(self, byte_buffer, idx):
140 | """ Parses the information of the detected points message
141 |
142 | """
143 | (x,y,z,vel), idx = self._unpack(byte_buffer, idx, items=4, form='f')
144 |
145 | return (x,y,z,vel), idx
146 |
147 | def _parse_msg_detected_points_side_info(self,byte_buffer, idx):
148 | (snr,noise), idx = self._unpack(byte_buffer, idx, items=2, form='H')
149 | return (snr,noise),idx
150 |
151 | def _parse_msg_azimut_static_heat_map(self, byte_buffer, idx):
152 | """ Parses the information of the azimuth heat map
153 |
154 | """
155 | try:
156 | (imag, real), idx = self._unpack(byte_buffer, idx, items=2, form='H')
157 | return (imag, real), idx
158 | except TypeError:
159 | pass
160 |
161 |
162 | def _process_azimut_heat_map(self, byte_buffer):
163 | """
164 | 热图
165 | """
166 | idx = byte_buffer.index(MAGIC_WORD)
167 | header_data, idx = self._parse_header_data(byte_buffer, idx)
168 | # print(header_data,idx)
169 | (tlv_type, tlv_length), idx = self._parse_header_tlv(byte_buffer, idx)
170 | # print(tlv_type, tlv_length,idx)
171 | azimuth_map = np.zeros((self.num_virtual_ant, self.config_params['numRangeBins'], 2),dtype=np.int16)
172 | # azimuth_map = np.zeros((7, self.config_params['numRangeBins'], 2),dtype=np.int16)
173 | for bin_idx in range(self.config_params['numRangeBins']):
174 | # for ant in range(7):
175 | for ant in range(self.num_virtual_ant):
176 | azimuth_map[ant][bin_idx][:], idx = self._parse_msg_azimut_static_heat_map(byte_buffer, idx)
177 | return azimuth_map
178 |
179 | def _process_detected_points(self, byte_buffer):
180 | """
181 | 点云
182 | """
183 | idx = byte_buffer.index(MAGIC_WORD)
184 | header_data, idx = self._parse_header_data(byte_buffer, idx)
185 | # print(header_data,idx)
186 |
187 | num_tlvs=header_data[6]
188 |
189 | #### tvl1 ####
190 | (tlv_type, tlv_length), idx = self._parse_header_tlv(byte_buffer, idx)
191 | num_points=int(tlv_length/16)
192 | data=np.zeros((num_points,6),dtype=np.float)
193 | for i in range(num_points):
194 | ( x, y, z,vel), idx = self._parse_msg_detected_points(byte_buffer, idx)
195 | data[i][0]=x
196 | data[i][1]=y
197 | data[i][2]=z
198 | data[i][3]=vel
199 |
200 | (tlv_type, tlv_length), idx = self._parse_header_tlv(byte_buffer, idx)
201 | for i in range(num_points):
202 | (snr,noise), idx = self._parse_msg_detected_points_side_info(byte_buffer, idx)
203 | data[i][4]=snr
204 | data[i][5]=noise
205 |
206 | return data
207 | @staticmethod
208 | def _unpack(byte_buffer, idx, order='', items=1, form='I'):
209 | """Helper function for parsing binary byte data
210 |
211 | Args:
212 | byte_buffer: Buffer with data
213 | idx: Curex in the buffer
214 | order: Little endian or big endian
215 | items: Number of items to be extracted
216 | form: Data type to be extracted
217 |
218 | Returns:rent ind
219 | Tuple [Tuple (object), int]
220 |
221 | """
222 | size = {'H': 2, 'h': 2, 'I': 4, 'Q': 8, 'f': 4}
223 | try:
224 | data = struct.unpack(order + str(items) + form, byte_buffer[idx:idx + (items * size[form])])
225 | if len(data) == 1:
226 | data = data[0]
227 | return data, idx + (items * size[form])
228 | except:
229 | return None
230 |
231 |
--------------------------------------------------------------------------------
/libs/UDPCAPTUREADCRAWDATA.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/libs/UDPCAPTUREADCRAWDATA.dll
--------------------------------------------------------------------------------
/libs/UDPCAPTUREADCRAWDATA.lib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/libs/UDPCAPTUREADCRAWDATA.lib
--------------------------------------------------------------------------------
/libs/libtest.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tkwer/RadarStream/f6fe0fbf7d4a20cb1198779c97d2c3f69e7bfc17/libs/libtest.so
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | from real_time_process import UdpListener, DataProcessor
2 | from radar_config import SerialConfig
3 | from radar_config import DCA1000Config
4 | from queue import Queue
5 | import pyqtgraph as pg
6 | from PyQt5 import QtWidgets
7 | from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
8 | import time
9 | import torch
10 | import sys
11 | import numpy as np
12 | from serial.tools import list_ports
13 | import iwr6843_tlv.detected_points as readpoint
14 | import globalvar as gl
15 | # import models.predict as predict
16 | # from models.model import CNet, FeatureFusionNet
17 | import os
18 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
19 | import matplotlib.pyplot as plt
20 | from colortrans import pg_get_cmap
21 |
22 | # -----------------------------------------------
23 | from UI_interface import Ui_MainWindow, Qt_pet
24 | # -----------------------------------------------
25 |
26 |
27 | datasetfile = 'dataset'
28 | datasetsencefile = ' '
29 | gesturedict = {
30 | '0':'backward',
31 | '1':'dbclick',
32 | '2':'down',
33 | '3':'front',
34 | '4':'Left',
35 | '5':'Right',
36 | '6':'up',
37 | '7':'NO'
38 | }
39 |
40 | cnt = 0
41 |
42 | _flagdisplay = False
43 |
44 | def loadmodel():
45 | global model
46 | if (modelfile.currentText()!='--select--'and modelfile.currentText()!=''):
47 | model_info = torch.load(modelfile.currentText(),map_location='cpu')
48 | # TODO:
49 | model = []
50 | model.load_state_dict(model_info['state_dict'])
51 | printlog('加载'+modelfile.currentText()+'模型成功!',fontcolor='blue')
52 | else:
53 | printlog("请加载模型!",fontcolor='red')
54 |
55 | def cleartjpg():
56 | view_gesture.setPixmap(QtGui.QPixmap("gesture_icons/"+str(7)+".jpg"))
57 | subWin.img_update("gesture_icons/"+str(7)+".jpg")
58 |
59 | def Judge_gesture(a,b,c,d,e):
60 | global _flagdisplay
61 | if model:
62 | # TODO:
63 | fanhui = [] #predict.predictGesture(model,d,b,e,c,a)
64 | view_gesture.setPixmap(QtGui.QPixmap("gesture_icons/"+str(fanhui)+".jpg"))
65 | subWin.img_update("gesture_icons/"+str(fanhui)+".jpg")
66 | QtCore.QTimer.singleShot(2000, cleartjpg)
67 | _flagdisplay = True
68 | printlog("输出:" + gesturedict[str(fanhui)],fontcolor='blue')
69 | return gesturedict[str(fanhui)]
70 |
71 | def update_figure():
72 | global img_rdi, img_rai, img_rti, img_rei, img_dti
73 | global idx,cnt
74 |
75 | img_rti.setImage(RTIData.get().sum(2)[0:1024:16,:], levels=[0, 1e4])
76 | # img_rdi.setImage(RDIData.get()[:, :, 0].T, levels=[30, 50])
77 | img_rdi.setImage(RDIData.get().sum(0)[:, :, 0].T,levels=[2e4, 4e5])
78 | # img_rei.setImage(REIData.get().T,levels=[0, 3])
79 | img_rei.setImage(REIData.get()[4:12,:,:].sum(0).T,levels=[0, 8])
80 | img_dti.setImage(DTIData.get(),levels=[0, 1000])
81 | # img_rai.setImage(RAIData.get().sum(0).T, levels=[1.2e3, 4e6])
82 | # img_rai.setImage(RAIData.get()[0,:,:].T, levels=[8e3, 2e4])
83 | # img_rai.setImage(RAIData.get(),levels=[0, 3])
84 | img_rai.setImage(RAIData.get()[4:12,:,:].sum(0),levels=[0, 8])
85 |
86 |
87 | if gl.get_value('usr_gesture'):
88 | RT_feature = RTIData.get().sum(2)[0:1024:16,:]
89 | DT_feature = DTIData.get()
90 | RDT_feature = RDIData.get()[:, :, :, 0]
91 | ART_feature = RAIData.get()
92 | ERT_feature = REIData.get()
93 |
94 | # if Recognizebtn.isChecked():
95 | if Recognizebtn.isChecked():
96 | # 识别
97 |
98 | time_start = time.time() # 记录开始时间
99 | result = Judge_gesture(RT_feature,DT_feature,RDT_feature,
100 | ART_feature,ERT_feature)
101 | time_end = time.time() # 记录结束时间
102 | time_sum = time_end - time_start # 计算的时间差为程序的执行时间,单位为秒/s
103 | printlog('识别时间:'+str(time_sum)+'s, '+'识别结果:'+result,fontcolor='blue')
104 |
105 |
106 | elif CaptureDatabtn.isChecked() and datasetsencefile != '':
107 | idx=idx+1
108 | # 收集
109 | np.save(datasetsencefile+'/RT_feature_'+str(idx).zfill(5)+'.npy',RT_feature)
110 | np.save(datasetsencefile+'/DT_feature_'+str(idx).zfill(5)+'.npy',DT_feature)
111 | np.save(datasetsencefile+'/RDT_feature_'+str(idx).zfill(5)+'.npy',RDT_feature)
112 | np.save(datasetsencefile+'/ART_feature_'+str(idx).zfill(5)+'.npy',ART_feature)
113 | np.save(datasetsencefile+'/ERT_feature_'+str(idx).zfill(5)+'.npy',ERT_feature)
114 | printlog('采集到特征:'+datasetfilebox.currentText()+'-'+str(idx).zfill(5),fontcolor='blue')
115 |
116 | gl.set_value('usr_gesture', False)
117 |
118 | QtCore.QTimer.singleShot(1, update_figure)
119 |
120 |
121 | def printlog(string,fontcolor):
122 | logtxt.moveCursor(QtGui.QTextCursor.End)
123 | gettime = time.strftime("%H:%M:%S", time.localtime())
124 | logtxt.append(""+str(gettime)+"-->"+string+"")
125 |
126 | def getradarparameters():
127 | if radarparameters.currentIndex() > -1 and radarparameters.currentText() != '--select--':
128 | radarparameters.setToolTip(radarparameters.currentText())
129 | configParameters = readpoint.IWR6843AOP_TLV()._initialize(config_file = radarparameters.currentText())
130 | rangeResolutionlabel.setText(str(configParameters["rangeResolutionMeters"])+'cm')
131 | dopplerResolutionlabel.setText(str(configParameters["dopplerResolutionMps"])+'m/s')
132 | maxRangelabel.setText(str(configParameters["maxRange"])+'m')
133 | maxVelocitylabel.setText(str(configParameters["maxVelocity"])+'m/s')
134 |
135 | def openradar(config,com):
136 | global radar_ctrl
137 | radar_ctrl = SerialConfig(name='ConnectRadar', CLIPort=com, BaudRate=115200)
138 | radar_ctrl.StopRadar()
139 | radar_ctrl.SendConfig(config)
140 | processor.start()
141 | processor.join(timeout=1)
142 | update_figure()
143 |
144 | def updatacomstatus(cbox):
145 | port_list = list(list_ports.comports())
146 | cbox.clear()
147 | for i in range(len(port_list)):
148 | cbox.addItem(str(port_list[i][0]))
149 |
150 | def setserialport(cbox, com):
151 | global CLIport_name
152 | global Dataport_name
153 | if cbox.currentIndex() > -1:
154 | port = cbox.currentText()
155 | if com == "CLI":
156 | CLIport_name = port
157 |
158 | else:
159 | Dataport_name = port
160 |
161 | def sendconfigfunc():
162 | global CLIport_name
163 | global Dataport_name
164 | if len(CLIport_name) != 0 and radarparameters.currentText() != '--select--':
165 | openradar(radarparameters.currentText(),CLIport_name)
166 | printlog(string = '发送成功', fontcolor='green')
167 | else:
168 | printlog(string = '发送失败', fontcolor='red')
169 |
170 |
171 | def setintervaltime():
172 | gl.set_value('timer_2s', True)
173 | QtCore.QTimer.singleShot(2000, setintervaltime)
174 |
175 | # cnt 用来计数 200ms*cnt,代表显示多长时间
176 | cnt = 0
177 | def setdisplaygestureicontime():
178 | global _flagdisplay, cnt
179 | if _flagdisplay==True:
180 | cnt = cnt + 1
181 | if cnt>4:
182 | cnt = 0
183 | view_gesture.setPixmap(QtGui.QPixmap("gesture_icons/"+str(7)+".jpg"))
184 | subWin.img_update("gesture_icons/"+str(7)+".jpg")
185 | _flagdisplay=False
186 | QtCore.QTimer.singleShot(200, setdisplaygestureicontime)
187 |
188 | def setcolor():
189 | if(color_.currentText()!='--select--' and color_.currentText()!=''):
190 | if color_.currentText() == 'customize':
191 | pgColormap = pg_get_cmap(color_.currentText())
192 | else:
193 | cmap=plt.cm.get_cmap(color_.currentText())
194 | pgColormap = pg_get_cmap(cmap)
195 | lookup_table = pgColormap.getLookupTable(0.0, 1.0, 256)
196 | img_rdi.setLookupTable(lookup_table)
197 | img_rai.setLookupTable(lookup_table)
198 | img_rti.setLookupTable(lookup_table)
199 | img_dti.setLookupTable(lookup_table)
200 | img_rei.setLookupTable(lookup_table)
201 |
202 | def get_filelist(dir,Filelist):
203 | newDir=dir
204 | #注意看dir是文件名还是路径+文件名!!!!!!!!!!!!!!
205 | if os.path.isfile(dir):
206 | dir_ = os.path.basename(dir)
207 | if (dir_[:2] == 'DT') and (dir_[-4:] == '.npy'):
208 | Filelist[0].append(dir)
209 | elif (dir_[:2] == 'RT') and (dir_[-4:] == '.npy'):
210 | Filelist[1].append(dir)
211 | elif (dir_[:3] == 'RDT') and (dir_[-4:] == '.npy'):
212 | Filelist[2].append(dir)
213 | elif (dir_[:3] == 'ART') and (dir_[-4:] == '.npy'):
214 | Filelist[3].append(dir)
215 | elif (dir_[:3] == 'ERT') and (dir_[-4:] == '.npy'):
216 | Filelist[4].append(dir)
217 | elif os.path.isdir(dir):
218 | for s in os.listdir(dir):
219 | newDir=os.path.join(dir,s)
220 | get_filelist(newDir,Filelist)
221 | return Filelist
222 |
223 | def savedatasetsencefile():
224 | global datasetsencefile,start_captureidx,idx
225 | datasetsencefile = datasetfile+'/'+ whodatafile.text()+'/'+datasetfilebox.currentText()
226 | if not os.path.exists(datasetsencefile): #判断是否存在文件夹如果不存在则创建为文件夹
227 | os.makedirs(datasetsencefile)
228 |
229 | featurelist = get_filelist(datasetsencefile, [[] for i in range(5)])
230 | start_captureidx = len(featurelist[0])
231 | idx = start_captureidx
232 |
233 |
234 | def show_sub():
235 | subWin.show()
236 | MainWindow.hide()
237 |
238 |
239 |
240 | def application():
241 | global color_,radarparameters,maxVelocitylabel,maxRangelabel,dopplerResolutionlabel,rangeResolutionlabel,logtxt
242 | global Recognizebtn,CaptureDatabtn,view_gesture,modelfile,datasetfilebox,whodatafile
243 | global img_rdi, img_rai, img_rti, img_rei, img_dti,ui
244 | global subWin,MainWindow
245 | app = QtWidgets.QApplication(sys.argv)
246 | MainWindow = QtWidgets.QMainWindow()
247 | MainWindow.show()
248 | ui = Ui_MainWindow()
249 |
250 | ui.setupUi(MainWindow)
251 | subWin = Qt_pet(MainWindow)
252 |
253 | # 改了D:\Applications\anaconda3\Lib\site-packages\pyqtgraph\graphicsItems\ViewBox
254 | # 里的ViewBox.py第919行padding = self.suggestPadding(ax)改成padding = 0
255 | view_rdi = ui.graphicsView_6.addViewBox()
256 | ui.graphicsView_6.setCentralWidget(view_rdi)#去边界
257 | view_rai = ui.graphicsView_4.addViewBox()
258 | ui.graphicsView_4.setCentralWidget(view_rai)#去边界
259 | view_rti = ui.graphicsView.addViewBox()
260 | ui.graphicsView.setCentralWidget(view_rti)#去边界
261 | view_dti = ui.graphicsView_2.addViewBox()
262 | ui.graphicsView_2.setCentralWidget(view_dti)#去边界
263 | view_rei = ui.graphicsView_3.addViewBox()
264 | ui.graphicsView_3.setCentralWidget(view_rei)#去边界
265 |
266 | view_gesture = ui.graphicsView_5
267 | view_gesture.setPixmap(QtGui.QPixmap("gesture_icons/7.jpg"))
268 |
269 | sendcfgbtn = ui.pushButton_11
270 | exitbtn = ui.pushButton_12
271 | Recognizebtn = ui.pushButton_15
272 | CaptureDatabtn = ui.pushButton
273 |
274 | color_ = ui.comboBox
275 | modelfile = ui.comboBox_2
276 | datasetfilebox = ui.comboBox_3
277 | radarparameters = ui.comboBox_7
278 | Cliportbox = ui.comboBox_8
279 |
280 | logtxt = ui.textEdit
281 | whodatafile = ui.lineEdit_6
282 | changepage = ui.actionload
283 |
284 |
285 | rangeResolutionlabel = ui.label_14
286 | dopplerResolutionlabel = ui.label_35
287 | maxRangelabel = ui.label_16
288 | maxVelocitylabel = ui.label_37
289 |
290 | # ---------------------------------------------------
291 | # lock the aspect ratio so pixels are always square
292 | # view_rai.setAspectLocked(True)
293 | # view_rti.setAspectLocked(True)
294 | img_rdi = pg.ImageItem(border=None)
295 | img_rai = pg.ImageItem(border=None)
296 | img_rti = pg.ImageItem(border=None)
297 | img_dti = pg.ImageItem(border=None)
298 | img_rei = pg.ImageItem(border=None)
299 |
300 | # Colormap
301 | pgColormap = pg_get_cmap('customize')
302 | lookup_table = pgColormap.getLookupTable(0.0, 1.0, 256)
303 | img_rdi.setLookupTable(lookup_table)
304 | img_rai.setLookupTable(lookup_table)
305 | img_rti.setLookupTable(lookup_table)
306 | img_dti.setLookupTable(lookup_table)
307 | img_rei.setLookupTable(lookup_table)
308 |
309 | view_rdi.addItem(img_rdi)
310 | view_rai.addItem(img_rai)
311 | view_rti.addItem(img_rti)
312 | view_dti.addItem(img_dti)
313 | view_rei.addItem(img_rei)
314 |
315 |
316 | Cliportbox.arrowClicked.connect(lambda:updatacomstatus(Cliportbox))
317 | Cliportbox.currentIndexChanged.connect(lambda:setserialport(Cliportbox, com = 'CLI'))
318 | color_.currentIndexChanged.connect(setcolor)
319 | modelfile.currentIndexChanged.connect(loadmodel)
320 | radarparameters.currentIndexChanged.connect(getradarparameters)
321 | datasetfilebox.currentIndexChanged.connect(savedatasetsencefile)
322 | whodatafile.editingFinished.connect(savedatasetsencefile)
323 | sendcfgbtn.clicked.connect(sendconfigfunc)
324 | Recognizebtn.clicked.connect(setintervaltime)
325 | # Recognizebtn.clicked.connect(setdisplaygestureicontime)
326 | CaptureDatabtn.clicked.connect(setintervaltime)
327 | changepage.triggered.connect(show_sub)
328 | # 2022/2/24 添加小型化控件 不能正常退出了
329 | exitbtn.clicked.connect(app.instance().exit)
330 |
331 | app.instance().exec_()
332 |
333 | try:
334 | if radar_ctrl.CLIPort:
335 | if radar_ctrl.CLIPort.isOpen():
336 | radar_ctrl.StopRadar()
337 | except:
338 | pass
339 |
340 |
341 | if __name__ == '__main__':
342 | # Queue for access data
343 | BinData = Queue() # 原始数据队列
344 |
345 | # 时间信息
346 | RTIData = Queue() # 距离时间队列
347 | DTIData = Queue() # 多普勒时间队列
348 |
349 | # 连续过程信息
350 | RDIData = Queue() # 距离多普勒队列
351 | RAIData = Queue() # 距离方位角队列
352 | REIData = Queue() # 方位角俯仰角队列
353 |
354 | # Radar config parameters
355 | NUM_TX = 3
356 | NUM_RX = 4
357 | NUM_CHIRPS = 64
358 | NUM_ADC_SAMPLES = 64
359 |
360 | radar_config = [NUM_ADC_SAMPLES, NUM_CHIRPS, NUM_TX, NUM_RX]
361 | frame_length = NUM_ADC_SAMPLES * NUM_CHIRPS * NUM_TX * NUM_RX * 2
362 |
363 | # config DCA1000 to receive bin data
364 | dca1000_cfg = DCA1000Config('DCA1000Config',config_address = ('192.168.33.30', 4096),
365 | FPGA_address_cfg=('192.168.33.180', 4096))
366 |
367 | collector = UdpListener('Listener', BinData, frame_length)
368 | processor = DataProcessor('Processor', radar_config, BinData, RTIData, DTIData,
369 | RDIData, RAIData, REIData)
370 | collector.start()
371 |
372 | application()
373 |
374 | dca1000_cfg.DCA1000_close()
375 |
376 | collector.join(timeout=1)
377 |
378 | print("Program close")
379 | sys.exit()
380 |
--------------------------------------------------------------------------------
/radar_config.py:
--------------------------------------------------------------------------------
1 | # collect data from TI DCA1000 EVM
2 |
3 | import serial
4 | import time
5 | import socket
6 |
7 | # Radar EVM setting
8 | class SerialConfig():
9 | def __init__(self, name, CLIPort, BaudRate):
10 | self.name = name
11 | self.CLIPort = serial.Serial(CLIPort, baudrate=BaudRate)
12 |
13 | def close(self):
14 | self.CLIPort.close()
15 |
16 | def SendConfig(self, ConfigFileName):
17 | for line in open(ConfigFileName):
18 | self.CLIPort.write((line.rstrip('\r\n') + '\n').encode())
19 | print(f"Sent: {line.strip()}")
20 |
21 | # 等待并读取返回数据(带超时)
22 | start_time = time.time()
23 | timeout = 0.1 # 2秒超时
24 | response = b''
25 |
26 | while time.time() - start_time < timeout:
27 | if self.CLIPort.in_waiting > 0:
28 | response += self.CLIPort.read(self.CLIPort.in_waiting)
29 | time.sleep(0.01) # 避免CPU占用过高
30 |
31 | print(f"Received: {response.decode(errors='ignore').strip()}")
32 | time.sleep(0.01)
33 |
34 | def StartRadar(self):
35 | self.CLIPort.write('sensorStart\n'.encode())
36 | print('sensorStart\n')
37 |
38 | def StopRadar(self):
39 | self.CLIPort.write('sensorStop\n'.encode())
40 | print('sensorStop\n')
41 |
42 | def DisconnectRadar(self):
43 | self.CLIPort.write('sensorStop\n'.encode())
44 | self.CLIPort.close()
45 |
46 | # DCA1000
47 | class DCA1000Config():
48 | def __init__(self, name, config_address, FPGA_address_cfg):
49 | self.name = name
50 | self.config_address = config_address
51 | self.FPGA_address_cfg = FPGA_address_cfg
52 | cmd_order = ['9', 'E', '3', 'B', '5', '6']
53 | self.sockConfig = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
54 | self.sockConfig.bind(config_address)
55 | for k in range(5):
56 | # Send the command
57 | self.sockConfig.sendto(self.send_cmd(cmd_order[k]), FPGA_address_cfg)
58 | time.sleep(0.1)
59 | # Request data back on the config port
60 | msg, server = self.sockConfig.recvfrom(2048)
61 | # print('receive command:', msg.hex())
62 |
63 | def DCA1000_close(self):
64 | self.sockConfig.sendto(self.send_cmd('6'), self.FPGA_address_cfg)
65 | self.sockConfig.close()
66 |
67 | def send_cmd(self, code):
68 | # command code list
69 | CODE_1 = (0x01).to_bytes(2, byteorder='little', signed=False)
70 | CODE_2 = (0x02).to_bytes(2, byteorder='little', signed=False)
71 | CODE_3 = (0x03).to_bytes(2, byteorder='little', signed=False)
72 | CODE_4 = (0x04).to_bytes(2, byteorder='little', signed=False)
73 | CODE_5 = (0x05).to_bytes(2, byteorder='little', signed=False)
74 | CODE_6 = (0x06).to_bytes(2, byteorder='little', signed=False)
75 | CODE_7 = (0x07).to_bytes(2, byteorder='little', signed=False)
76 | CODE_8 = (0x08).to_bytes(2, byteorder='little', signed=False)
77 | CODE_9 = (0x09).to_bytes(2, byteorder='little', signed=False)
78 | CODE_A = (0x0A).to_bytes(2, byteorder='little', signed=False)
79 | CODE_B = (0x0B).to_bytes(2, byteorder='little', signed=False)
80 | CODE_C = (0x0C).to_bytes(2, byteorder='little', signed=False)
81 | CODE_D = (0x0D).to_bytes(2, byteorder='little', signed=False)
82 | CODE_E = (0x0E).to_bytes(2, byteorder='little', signed=False)
83 |
84 | # packet header & footer
85 | header = (0xA55A).to_bytes(2, byteorder='little', signed=False)
86 | footer = (0xEEAA).to_bytes(2, byteorder='little', signed=False)
87 |
88 | # data size
89 | dataSize_0 = (0x00).to_bytes(2, byteorder='little', signed=False)
90 | dataSize_6 = (0x06).to_bytes(2, byteorder='little', signed=False)
91 |
92 | # data
93 | data_FPGA_config = (0x01020102031e).to_bytes(6, byteorder='big', signed=False)
94 | data_packet_config = (0xc005350c0000).to_bytes(6, byteorder='big', signed=False)
95 |
96 | # connect to DCA1000
97 | connect_to_FPGA = header + CODE_9 + dataSize_0 + footer
98 | read_FPGA_version = header + CODE_E + dataSize_0 + footer
99 | config_FPGA = header + CODE_3 + dataSize_6 + data_FPGA_config + footer
100 | config_packet = header + CODE_B + dataSize_6 + data_packet_config + footer
101 | start_record = header + CODE_5 + dataSize_0 + footer
102 | stop_record = header + CODE_6 + dataSize_0 + footer
103 |
104 | if code == '9':
105 | re = connect_to_FPGA
106 | elif code == 'E':
107 | re = read_FPGA_version
108 | elif code == '3':
109 | re = config_FPGA
110 | elif code == 'B':
111 | re = config_packet
112 | elif code == '5':
113 | re = start_record
114 | elif code == '6':
115 | re = stop_record
116 | else:
117 | re = 'NULL'
118 | # print('send command:', re.hex())
119 | return re
--------------------------------------------------------------------------------
/real_time_process.py:
--------------------------------------------------------------------------------
1 | import threading as th
2 | import numpy as np
3 | import DSP
4 | from dsp.utils import Window
5 | from ctypes import *
6 |
7 | # 这一块处理需要手动修改代码很不友好,希望有人能pull request
8 | dll = cdll.LoadLibrary('realtimeSystem/libs/UDPCAPTUREADCRAWDATA.dll')
9 | # dll = cdll.LoadLibrary('realtimeSystem/dll/libtest.so')
10 |
11 | a = np.zeros(1).astype(np.int)
12 | # 内存大小至少是frame_length的两倍 ,双缓冲区
13 | # 98304 的计算方法是例如你的配置如下:
14 |
15 | # adc_sample = 64
16 | # chirp = 64
17 | # tx_num = 3
18 | # rx_num = 4
19 | # frame_length = adc_sample * chirp * tx_num * rx_num * 2 = 98304
20 | b = np.zeros(98304*2).astype(c_short)
21 |
22 | # 转换为ctypes,这里转换后的可以直接利用ctypes转换为c语言中的int*,然后在c中使用
23 | a_ctypes_ptr = cast(a.ctypes.data, POINTER(c_int))
24 | # 转换为ctypes,这里转换后的可以直接利用ctypes转换为c语言中的int*,然后在c中使用
25 | b_ctypes_ptr = cast(b.ctypes.data, POINTER(c_short))
26 |
27 |
28 | class UdpListener(th.Thread):
29 | def __init__(self, name, bin_data, data_frame_length):
30 | th.Thread.__init__(self, name=name)
31 | self.bin_data = bin_data
32 | self.frame_length = data_frame_length
33 |
34 | def run(self):
35 | global a_ctypes_ptr, b_ctypes_ptr
36 | dll.captureudp(a_ctypes_ptr, b_ctypes_ptr, self.frame_length)
37 |
38 |
39 | class DataProcessor(th.Thread):
40 | def __init__(self, name, config, bin_queue, rti_queue, dti_queue, rdi_queue, rai_queue, rei_queue):
41 |
42 | th.Thread.__init__(self, name=name)
43 | self.adc_sample = config[0]
44 | self.chirp_num = config[1]
45 | self.tx_num = config[2]
46 | self.rx_num = config[3]
47 | self.bin_queue = bin_queue
48 | self.rti_queue = rti_queue
49 | self.dti_queue = dti_queue
50 | self.rdi_queue = rdi_queue
51 | self.rai_queue = rai_queue
52 | self.rei_queue = rei_queue
53 |
54 | def run(self):
55 | global frame_count
56 | frame_count = 0
57 | lastflar = 0
58 | while True:
59 | # 对应dll中的双缓冲区,0区和1区
60 | if(lastflar != a_ctypes_ptr[0]):
61 | # print(a_ctypes_ptr[0])
62 | lastflar = a_ctypes_ptr[0]
63 | # self.bin_data.put(np.array(b_ctypes_ptr[98304*(1-a_ctypes_ptr[0]):98304*(2-a_ctypes_ptr[0])]))
64 | # data = self.bin_queue.get()
65 | data = np.array(
66 | b_ctypes_ptr[98304*(1-a_ctypes_ptr[0]):98304*(2-a_ctypes_ptr[0])])
67 | data = np.reshape(data, [-1, 4])
68 | data = data[:, 0:2:] + 1j * data[:, 2::]
69 | # [num_chirps*tx_num, wuli_antennas, num_samples]
70 | data = np.reshape(
71 | data, [self.chirp_num * self.tx_num, -1, self.adc_sample])
72 | # [num_chirps*tx_num, num_samples, wuli_antennas]
73 | data = data.transpose([0, 2, 1])
74 | # 192 = 64*3 记得改
75 | # TX1的:[num_chirps, num_samples, wuli_antennas]
76 | ch1_data = data[0: self.adc_sample*3: 3, :, :]
77 | # TX2的:[num_chirps, num_samples, wuli_antennas]
78 | ch2_data = data[1: self.adc_sample*3: 3, :, :]
79 | # TX3的:[num_chirps, num_samples, wuli_antennas]
80 | ch3_data = data[2: self.adc_sample*3: 3, :, :]
81 | # channel的排序方式:(0:TX1-RX1,1:TX1-RX2,2:TX1-RX3,3:TX1-RX4,| 4:TX2-RX1,5:TX2-RX2,6:TX2-RX3,7:TX2-RX4,| 8:TX3-RX1,9:TX3-RX2,10:TX3-RX3,11:TX3-RX4)
82 | data = np.concatenate([ch1_data, ch2_data, ch3_data], axis=2)
83 |
84 | frame_count += 1
85 |
86 | rti, rdi, dti = DSP.RDA_Time(
87 | data, window_type_1d=Window.HANNING, axis=1)
88 |
89 | # _, rdi = DSP.Range_Doppler(data, mode=2, padding_size=[128, 64])
90 | rai, rei = DSP.Range_Angle(
91 | data, padding_size=[128, 64, 64])
92 | self.rti_queue.put(rti)
93 | self.dti_queue.put(dti)
94 | self.rdi_queue.put(rdi)
95 | self.rai_queue.put(rai)
96 | self.rei_queue.put(rei)
97 |
--------------------------------------------------------------------------------