├── models
└── readme.txt
├── CMakeLists.txt
├── package.xml
├── launch
└── lanoising.launch
├── README.md
└── src
└── lanoising.py
/models/readme.txt:
--------------------------------------------------------------------------------
1 | put all the models here.
2 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(lanoising)
3 |
4 | find_package(catkin REQUIRED COMPONENTS
5 | rospy
6 | sensor_msgs
7 | )
8 |
9 | catkin_package(
10 | CATKIN_DEPENDS rospy sensor_msgs
11 | )
12 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | lanoising
4 | 0.0.1
5 | Noising the point cloud
6 | Tao Yang
7 | BSD
8 |
9 | catkin
10 |
11 | rospy
12 | sensor_msgs
13 |
14 | rospy
15 | sensor_msgs
16 |
17 |
--------------------------------------------------------------------------------
/launch/lanoising.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # lanoising (IROS 2020 & T-ITS 2021)
2 |
3 | [](https://opensource.org/licenses/BSD-3-Clause)
4 |
5 | Noising the point cloud.
6 |
7 | Video for IROS 2020: https://youtu.be/fy-E4sJ-7bA
8 |
9 | Video for ROSCon 2020: https://vimeo.com/480569545
10 |
11 | The package is tested in Ubuntu 16.04, ROS kinetic 1.12.14, Python 3.6.
12 |
13 | Requirements:
14 |
15 | numpy 1.17.2
16 |
17 | scikit-learn 0.23.1
18 |
19 | tensorflow 1.14.0
20 |
21 | keras 2.2.4
22 |
23 | Anaconda3 is recommended. With Anaconda3, only tensorflow and keras need to be installed.
24 |
25 | To make ROS and Anaconda3 compatible, in a new terminal:
26 |
27 | `gedit ~/.bashrc`
28 |
29 | add: `source /opt/ros/kinetic/setup.bash`
30 |
31 | delete: `export PATH="/home/tyang/anaconda3/bin:$PATH"`
32 |
33 | `source ~/.bashrc`
34 |
35 | before launch the package:
36 |
37 | `export PATH="/home/tyang/anaconda3/bin:$PATH"`
38 |
39 | example:
40 |
41 | download the lanoising package and decompress in ./src of your catkin workspace (e.g. catkin_ws).
42 |
43 | in a new terminal:
44 |
45 | ```
46 | cd ./catkin_ws
47 |
48 | catkin_make
49 | ```
50 |
51 | download the models and put all the files in ./catkin_ws/src/lanoising/models:
52 |
53 | https://drive.google.com/file/d/1CoVrr3dVQ5DY4WpF7xCM9z6Vx7PYKW1w/view?usp=sharing
54 |
55 | or: https://pan.baidu.com/s/1ZFhiuWFYNuSCThR02bLO8A with the code: ptio
56 |
57 | in the terminal:
58 |
59 | `roscore`
60 |
61 | in a new terminal:
62 |
63 | `rviz`
64 |
65 | play the reference rosbag (point clouds recorded by velodyne LiDAR under clear weather conditions):
66 |
67 | `rosbag play -l --clock 2019-02-19-17-13-37.bag`
68 |
69 | in rviz, change the Fixed frame to "velodyne".
70 |
71 | add the topic "/velodyne_points" in rviz to show the reference data.
72 |
73 | set the visibility in lanoising.py.
74 |
75 | in a new terminal:
76 |
77 | ```
78 | cd ./catkin_ws
79 |
80 | source devel/setup.bash
81 |
82 | export PATH="/home/tyang/anaconda3/bin:$PATH"
83 |
84 | roslaunch lanoising lanoising.launch
85 | ```
86 |
87 | add the topic "/filtered_points" in rviz to show the noising point cloud.
88 |
89 | ## Citation
90 | If you publish work based on, or using, this code, we would appreciate citations to the following:
91 |
92 | @inproceedings{yt20iros,
93 | author = {Tao Yang and You Li and Yassine Ruichek and Zhi Yan},
94 | title = {LaNoising: A Data-driven Approach for 903nm ToF LiDAR Performance Modeling under Fog},
95 | booktitle = {Proceedings of the 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
96 | month = {October},
97 | year = {2020}
98 | pages = {10084-10091},
99 | doi = {10.1109/IROS45743.2020.9341178}
100 | }
101 |
102 | @artical{yt21its,
103 | author ={Tao Yang and You Li and Yassine Ruichek and Zhi Yan},
104 | journal ={IEEE Transactions on Intelligent Transportation Systems},
105 | title ={Performance Modeling a Near-Infrared ToF LiDAR Under Fog: A Data-Driven Approach},
106 | year ={2022},
107 | volume ={23},
108 | number ={8},
109 | pages ={11227-11236},
110 | doi ={10.1109/TITS.2021.3102138}
111 | }
112 |
--------------------------------------------------------------------------------
/src/lanoising.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 |
3 | import rospy
4 | import std_msgs.msg
5 | from sensor_msgs.msg import PointCloud2, PointField
6 | import sensor_msgs.point_cloud2 as pc2
7 | import numpy as np
8 | import joblib
9 |
10 | import keras
11 | from keras.layers import Input
12 | from keras.models import Model
13 | from keras.layers.core import Dense, Dropout
14 | from keras.callbacks import Callback
15 | from keras.layers.advanced_activations import ELU
16 | from keras import backend as K
17 | from keras.models import load_model
18 |
19 |
20 | # functions for MDN
21 | def elu_modif(x, a=1.):
22 | return ELU(alpha=a)(x) + 1. + 1e-8
23 |
24 | def log_sum_exp(x, axis=None):
25 | """Log-sum-exp trick implementation"""
26 | x_max = K.max(x, axis=axis, keepdims=True)
27 | return K.log(K.sum(K.exp(x - x_max),
28 | axis=axis, keepdims=True)) + x_max
29 |
30 | def mean_log_Gaussian_like_disappear(y_true, parameters):
31 | """Mean Log Gaussian Likelihood distribution
32 | Note: The 'c' variable is obtained as global variable
33 | """
34 | components = K.reshape(parameters, [-1, c + 2, md])
35 | mu = components[:, :c, :]
36 | sigma = components[:, c, :]
37 | alpha = components[:, c + 1, :]
38 | alpha = K.softmax(K.clip(alpha, 1e-8, 1.))
39 |
40 | exponent = K.log(alpha) - .5 * float(c) * K.log(2 * np.pi) \
41 | - float(c) * K.log(sigma) \
42 | - K.sum((K.expand_dims(y_true, 2) - mu) ** 2, axis=1) / (2 * (sigma) ** 2)
43 |
44 | log_gauss = log_sum_exp(exponent, axis=1)
45 | res = - K.mean(log_gauss)
46 | return res
47 |
48 | def mean_log_Gaussian_like_range(y_true, parameters):
49 | """Mean Log Gaussian Likelihood distribution
50 | Note: The 'c' variable is obtained as global variable
51 | """
52 | components = K.reshape(parameters, [-1, c + 2, mr])
53 | mu = components[:, :c, :]
54 | sigma = components[:, c, :]
55 | alpha = components[:, c + 1, :]
56 | alpha = K.softmax(K.clip(alpha, 1e-8, 1.))
57 |
58 | exponent = K.log(alpha) - .5 * float(c) * K.log(2 * np.pi) \
59 | - float(c) * K.log(sigma) \
60 | - K.sum((K.expand_dims(y_true, 2) - mu) ** 2, axis=1) / (2 * (sigma) ** 2)
61 |
62 | log_gauss = log_sum_exp(exponent, axis=1)
63 | res = - K.mean(log_gauss)
64 | return res
65 |
66 | def mean_log_Gaussian_like_intensity(y_true, parameters):
67 | """Mean Log Gaussian Likelihood distribution
68 | Note: The 'c' variable is obtained as global variable
69 | """
70 | components = K.reshape(parameters, [-1, c + 2, mi])
71 | mu = components[:, :c, :]
72 | sigma = components[:, c, :]
73 | alpha = components[:, c + 1, :]
74 | alpha = K.softmax(K.clip(alpha, 1e-8, 1.))
75 |
76 | exponent = K.log(alpha) - .5 * float(c) * K.log(2 * np.pi) \
77 | - float(c) * K.log(sigma) \
78 | - K.sum((K.expand_dims(y_true, 2) - mu) ** 2, axis=1) / (2 * (sigma) ** 2)
79 |
80 | log_gauss = log_sum_exp(exponent, axis=1)
81 | res = - K.mean(log_gauss)
82 | return res
83 |
84 | def LaPlacePDF(x, mu, b):
85 | return np.exp(-np.abs(x - mu) / b) / (2 * b)
86 |
87 | def LaGaussianPDF(x, mu, sigma):
88 | return np.exp(-np.square(x - mu) / (2 * np.square(sigma))) / (sigma * np.sqrt(2 * np.pi))
89 |
90 | def callback(msg):
91 |
92 | pt_x = []
93 | pt_y = []
94 | pt_z = []
95 | pt_d = []
96 | pt_i = []
97 | pt_x_new1 = []
98 | pt_y_new1 = []
99 | pt_z_new1 = []
100 | pt_i_new1 = []
101 | pt_x_new2 = []
102 | pt_y_new2 = []
103 | pt_z_new2 = []
104 | pt_i_new2 = []
105 |
106 | # get all the points
107 | points = pc2.read_points(msg, field_names = ("x", "y", "z", "intensity"), skip_nans=False)
108 | for point in points:
109 | pt_x.append(point[0])
110 | pt_y.append(point[1])
111 | pt_z.append(point[2])
112 | pt_d.append(np.sqrt(np.square(point[0]) + np.square(point[1]) + np.square(point[2])))
113 | pt_i.append(point[3])
114 |
115 | pt_x = np.array(pt_x)
116 | pt_y = np.array(pt_y)
117 | pt_z = np.array(pt_z)
118 | pt_d = np.array(pt_d)
119 | pt_i = np.array(pt_i)
120 | print('maximal intensity: ', np.max(pt_i))
121 | print('median intensity: ', np.median(pt_i))
122 |
123 | # only deal with points in max_range
124 | index_outrange = np.where(pt_d > max_range)
125 | # pt_x_new0 = pt_x[index_outrange[0]]
126 | # pt_y_new0 = pt_y[index_outrange[0]]
127 | # pt_z_new0 = pt_z[index_outrange[0]]
128 | # pt_i_new0 = pt_i[index_outrange[0]]
129 | pt_x = np.delete(pt_x, index_outrange[0])
130 | pt_y = np.delete(pt_y, index_outrange[0])
131 | pt_z = np.delete(pt_z, index_outrange[0])
132 | pt_d = np.delete(pt_d, index_outrange[0])
133 | pt_i = np.delete(pt_i, index_outrange[0])
134 |
135 | # process the points for diffuse reflectors
136 | index = np.where(pt_i <= intensity)
137 | if np.size(index[0]) > 0:
138 |
139 | pt_x_new = pt_x[index[0]]
140 | pt_y_new = pt_y[index[0]]
141 | pt_z_new = pt_z[index[0]]
142 | pt_d_new = pt_d[index[0]]
143 | pt_i_new = pt_i[index[0]]
144 |
145 | # disappear visibility prediction
146 | x_pred = np.transpose(np.vstack((pt_d_new, pt_i_new)))
147 | if disappear_model == 0:
148 | x_pred[:, 1] = x_pred[:, 1] / 100.0
149 | y_pred, y_sigm = gpr1_disappear.predict(x_pred, return_std=True)
150 | else:
151 | x_pred[:, 0] = x_pred[:, 0] / 30.0
152 | x_pred[:, 1] = x_pred[:, 1] / 100.0
153 | parameters = mdn1_disappear.predict(x_pred)
154 | comp = np.reshape(parameters, [-1, c + 2, md])
155 | mu_pred = comp[:, :c, :]
156 | sigma_pred = comp[:, c, :]
157 | alpha_pred = comp[:, c + 1, :]
158 | y_pred = np.zeros(len(mu_pred))
159 | y_sigm = np.zeros(len(mu_pred))
160 | for mx in range(mu_pred.shape[-1]):
161 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
162 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
163 | y_sigm = np.sqrt(y_sigm)
164 | vdis = np.random.normal(y_pred, y_sigm)
165 |
166 | # range prediction
167 | x_pred = np.vstack((np.ones(np.size(pt_i_new)) * visibility, pt_i_new, pt_d_new))
168 | x_pred = np.atleast_2d(x_pred).T
169 | x_pred[:, 0] = x_pred[:, 0] / 200.0
170 | x_pred[:, 1] = x_pred[:, 1] / 100.0
171 | x_pred[:, 2] = x_pred[:, 2] / 30.0
172 | if range_model == 0:
173 | y_pred, y_sigm = gpr1_range.predict(x_pred, return_std=True)
174 | else:
175 | parameters = mdn1_range.predict(x_pred)
176 | comp = np.reshape(parameters, [-1, c + 2, mr])
177 | mu_pred = comp[:, :c, :]
178 | sigma_pred = comp[:, c, :]
179 | alpha_pred = comp[:, c + 1, :]
180 | y_pred = np.zeros(len(mu_pred))
181 | y_sigm = np.zeros(len(mu_pred))
182 | for mx in range(mu_pred.shape[-1]):
183 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
184 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
185 | y_sigm = np.sqrt(y_sigm)
186 |
187 | # sampling
188 | pt_d_new1_c = np.random.normal(y_pred, y_sigm)
189 | ratio1 = pt_d_new1_c / pt_d_new
190 | index_neg = np.where(ratio1 < 0)
191 | ratio1[index_neg[0]] = 0
192 | index_big = np.where(ratio1 > 1)
193 | ratio1[index_big[0]] = 1
194 | pt_x_new1_c = pt_x_new * ratio1
195 | pt_y_new1_c = pt_y_new * ratio1
196 | pt_z_new1_c = pt_z_new * ratio1
197 | errors = abs(pt_d_new1_c - pt_d_new)
198 | index_vis = np.where((vdis <= visibility) & (errors <= sigma))
199 |
200 | # get good points
201 | if np.size(index_vis[0]) > 0:
202 | pt_x_new1_a = pt_x_new[index_vis[0]]
203 | pt_y_new1_a = pt_y_new[index_vis[0]]
204 | pt_z_new1_a = pt_z_new[index_vis[0]]
205 | pt_d_new1_a = pt_d_new[index_vis[0]]
206 | pt_i_new1_a = pt_i_new[index_vis[0]]
207 |
208 | x_pred = np.vstack((np.ones(np.size(pt_i_new1_a)) * visibility, pt_i_new1_a, pt_d_new1_a))
209 | x_pred = np.atleast_2d(x_pred).T
210 | x_pred[:, 0] = x_pred[:, 0] / 200.0
211 | x_pred[:, 1] = x_pred[:, 1] / 100.0
212 | x_pred[:, 2] = x_pred[:, 2] / 30.0
213 |
214 | # intensity estimation
215 | if intensity_model == 0:
216 | y_pred, y_sigm = gpr1_intensity.predict(x_pred, return_std=True)
217 | else:
218 | parameters = mdn1_intensity.predict(x_pred)
219 | comp = np.reshape(parameters, [-1, c + 2, mi])
220 | mu_pred = comp[:, :c, :]
221 | sigma_pred = comp[:, c, :]
222 | alpha_pred = comp[:, c + 1, :]
223 | y_pred = np.zeros(len(mu_pred))
224 | y_sigm = np.zeros(len(mu_pred))
225 | for mx in range(mu_pred.shape[-1]):
226 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
227 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
228 | y_sigm = np.sqrt(y_sigm)
229 |
230 | pt_i_new1_tmp = np.random.normal(y_pred, y_sigm)
231 | index_neg = np.where(pt_i_new1_tmp < 0)
232 | pt_i_new1_tmp[index_neg[0]] = 0
233 | index_big = np.where(pt_i_new1_tmp > pt_i_new1_a)
234 | pt_i_new1_tmp[index_big[0]] = pt_i_new1_a[index_big[0]]
235 | pt_i_new1_a = pt_i_new1_tmp
236 |
237 | # get noisy points
238 | if (np.size(pt_x_new) - np.size(index_vis[0])) > 0:
239 | pt_d_new1_b = np.delete(pt_d_new, index_vis[0])
240 | pt_i_new1_b = np.delete(pt_i_new, index_vis[0])
241 | x_pred = np.vstack((np.ones(np.size(pt_i_new1_b)) * visibility, pt_i_new1_b, pt_d_new1_b))
242 | x_pred = np.atleast_2d(x_pred).T
243 | x_pred[:, 0] = x_pred[:, 0] / 200.0
244 | x_pred[:, 1] = x_pred[:, 1] / 100.0
245 | x_pred[:, 2] = x_pred[:, 2] / 30.0
246 |
247 | pt_x_new1_c = np.delete(pt_x_new1_c, index_vis[0])
248 | pt_y_new1_c = np.delete(pt_y_new1_c, index_vis[0])
249 | pt_z_new1_c = np.delete(pt_z_new1_c, index_vis[0])
250 |
251 | # intensity estimation
252 | if intensity_model == 0:
253 | y_pred, y_sigm = gpr1_intensity.predict(x_pred, return_std=True)
254 | else:
255 | parameters = mdn1_intensity.predict(x_pred)
256 | comp = np.reshape(parameters, [-1, c + 2, mi])
257 | mu_pred = comp[:, :c, :]
258 | sigma_pred = comp[:, c, :]
259 | alpha_pred = comp[:, c + 1, :]
260 | y_pred = np.zeros(len(mu_pred))
261 | y_sigm = np.zeros(len(mu_pred))
262 | for mx in range(mu_pred.shape[-1]):
263 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
264 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
265 | y_sigm = np.sqrt(y_sigm)
266 |
267 | pt_i_new1_tmp = np.random.normal(y_pred, y_sigm)
268 | index_neg = np.where(pt_i_new1_tmp < 0)
269 | pt_i_new1_tmp[index_neg[0]] = 0
270 | index_big = np.where(pt_i_new1_tmp > pt_i_new1_b)
271 | pt_i_new1_tmp[index_big[0]] = pt_i_new1_b[index_big[0]]
272 | pt_i_new1_c = pt_i_new1_tmp
273 |
274 | # put the good points and noisy points together
275 | if np.size(index_vis[0]) > 0 and (np.size(pt_x_new) - np.size(index_vis[0])) > 0:
276 | pt_x_new1 = np.hstack((pt_x_new1_a, pt_x_new1_c))
277 | pt_y_new1 = np.hstack((pt_y_new1_a, pt_y_new1_c))
278 | pt_z_new1 = np.hstack((pt_z_new1_a, pt_z_new1_c))
279 | pt_i_new1 = np.hstack((pt_i_new1_a, pt_i_new1_c))
280 | if np.size(index_vis[0]) > 0 and (np.size(pt_x_new) - np.size(index_vis[0])) == 0:
281 | pt_x_new1 = pt_x_new1_a
282 | pt_y_new1 = pt_y_new1_a
283 | pt_z_new1 = pt_z_new1_a
284 | pt_i_new1 = pt_i_new1_a
285 | if np.size(index_vis[0]) == 0 and (np.size(pt_x_new) - np.size(index_vis[0])) > 0:
286 | pt_x_new1 = pt_x_new1_c
287 | pt_y_new1 = pt_y_new1_c
288 | pt_z_new1 = pt_z_new1_c
289 | pt_i_new1 = pt_i_new1_c
290 |
291 | # process the points for retro-reflectors
292 | index = np.where(pt_i > intensity)
293 | if np.size(index[0]) > 0:
294 | pt_x_new = pt_x[index[0]]
295 | pt_y_new = pt_y[index[0]]
296 | pt_z_new = pt_z[index[0]]
297 | pt_d_new = pt_d[index[0]]
298 | pt_i_new = pt_i[index[0]]
299 |
300 | # disappear visibility prediction
301 | x_pred = np.transpose(np.vstack((pt_d_new, pt_i_new)))
302 | if disappear_model == 0:
303 | x_pred[:, 1] = x_pred[:, 1] / 100.0
304 | y_pred, y_sigm = gpr2_disappear.predict(x_pred, return_std=True)
305 | else:
306 | x_pred[:, 0] = x_pred[:, 0] / 30.0
307 | x_pred[:, 1] = x_pred[:, 1] / 100.0
308 | parameters = mdn2_disappear.predict(x_pred)
309 | comp = np.reshape(parameters, [-1, c + 2, md])
310 | mu_pred = comp[:, :c, :]
311 | sigma_pred = comp[:, c, :]
312 | alpha_pred = comp[:, c + 1, :]
313 | y_pred = np.zeros(len(mu_pred))
314 | y_sigm = np.zeros(len(mu_pred))
315 | for mx in range(mu_pred.shape[-1]):
316 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
317 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
318 | y_sigm = np.sqrt(y_sigm)
319 | vdis = np.random.normal(y_pred, y_sigm)
320 |
321 | # range prediction
322 | x_pred = np.vstack((np.ones(np.size(pt_i_new)) * visibility, pt_i_new, pt_d_new))
323 | x_pred = np.atleast_2d(x_pred).T
324 | x_pred[:, 0] = x_pred[:, 0] / 200.0
325 | x_pred[:, 1] = x_pred[:, 1] / 100.0
326 | x_pred[:, 2] = x_pred[:, 2] / 30.0
327 | if range_model == 0:
328 | y_pred, y_sigm = gpr2_range.predict(x_pred, return_std=True)
329 | else:
330 | parameters = mdn2_range.predict(x_pred)
331 | comp = np.reshape(parameters, [-1, c + 2, mr])
332 | mu_pred = comp[:, :c, :]
333 | sigma_pred = comp[:, c, :]
334 | alpha_pred = comp[:, c + 1, :]
335 | y_pred = np.zeros(len(mu_pred))
336 | y_sigm = np.zeros(len(mu_pred))
337 | for mx in range(mu_pred.shape[-1]):
338 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
339 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
340 | y_sigm = np.sqrt(y_sigm)
341 |
342 | # sampling
343 | pt_d_new2_c = np.random.normal(y_pred, y_sigm)
344 | ratio2 = pt_d_new2_c / pt_d_new
345 | index_neg = np.where(ratio2 < 0)
346 | ratio2[index_neg[0]] = 0
347 | index_big = np.where(ratio2 > 1)
348 | ratio2[index_big[0]] = 1
349 | pt_x_new2_c = pt_x_new * ratio2
350 | pt_y_new2_c = pt_y_new * ratio2
351 | pt_z_new2_c = pt_z_new * ratio2
352 | errors = abs(pt_d_new2_c - pt_d_new)
353 | index_vis = np.where((vdis <= visibility) & (errors <= sigma))
354 |
355 | # get good points
356 | if np.size(index_vis[0]) > 0:
357 | pt_x_new2_a = pt_x_new[index_vis[0]]
358 | pt_y_new2_a = pt_y_new[index_vis[0]]
359 | pt_z_new2_a = pt_z_new[index_vis[0]]
360 | pt_d_new2_a = pt_d_new[index_vis[0]]
361 | pt_i_new2_a = pt_i_new[index_vis[0]]
362 |
363 | x_pred = np.vstack((np.ones(np.size(pt_i_new2_a)) * visibility, pt_i_new2_a, pt_d_new2_a))
364 | x_pred = np.atleast_2d(x_pred).T
365 | x_pred[:, 0] = x_pred[:, 0] / 200.0
366 | x_pred[:, 1] = x_pred[:, 1] / 100.0
367 | x_pred[:, 2] = x_pred[:, 2] / 30.0
368 |
369 | # intensity estimation
370 | if intensity_model == 0:
371 | y_pred, y_sigm = gpr2_intensity.predict(x_pred, return_std=True)
372 | else:
373 | parameters = mdn2_intensity.predict(x_pred)
374 | comp = np.reshape(parameters, [-1, c + 2, mi])
375 | mu_pred = comp[:, :c, :]
376 | sigma_pred = comp[:, c, :]
377 | alpha_pred = comp[:, c + 1, :]
378 | y_pred = np.zeros(len(mu_pred))
379 | y_sigm = np.zeros(len(mu_pred))
380 | for mx in range(mu_pred.shape[-1]):
381 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
382 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
383 | y_sigm = np.sqrt(y_sigm)
384 |
385 | pt_i_new2_tmp = np.random.normal(y_pred, y_sigm)
386 | index_neg = np.where(pt_i_new2_tmp < 0)
387 | pt_i_new2_tmp[index_neg[0]] = 0
388 | index_big = np.where(pt_i_new2_tmp > pt_i_new2_a)
389 | pt_i_new2_tmp[index_big[0]] = pt_i_new2_a[index_big[0]]
390 | pt_i_new2_a = pt_i_new2_tmp
391 |
392 | # get noisy points
393 | if (np.size(pt_x_new) - np.size(index_vis[0])) > 0:
394 | pt_d_new2_b = np.delete(pt_d_new, index_vis[0])
395 | pt_i_new2_b = np.delete(pt_i_new, index_vis[0])
396 | x_pred = np.vstack((np.ones(np.size(pt_i_new2_b)) * visibility, pt_i_new2_b, pt_d_new2_b))
397 | x_pred = np.atleast_2d(x_pred).T
398 | x_pred[:, 0] = x_pred[:, 0] / 200.0
399 | x_pred[:, 1] = x_pred[:, 1] / 100.0
400 | x_pred[:, 2] = x_pred[:, 2] / 30.0
401 |
402 | pt_x_new2_c = np.delete(pt_x_new2_c, index_vis[0])
403 | pt_y_new2_c = np.delete(pt_y_new2_c, index_vis[0])
404 | pt_z_new2_c = np.delete(pt_z_new2_c, index_vis[0])
405 |
406 | # intensity estimation
407 | if intensity_model == 0:
408 | y_pred, y_sigm = gpr2_intensity.predict(x_pred, return_std=True)
409 | else:
410 | parameters = mdn2_intensity.predict(x_pred)
411 | comp = np.reshape(parameters, [-1, c + 2, mi])
412 | mu_pred = comp[:, :c, :]
413 | sigma_pred = comp[:, c, :]
414 | alpha_pred = comp[:, c + 1, :]
415 | y_pred = np.zeros(len(mu_pred))
416 | y_sigm = np.zeros(len(mu_pred))
417 | for mx in range(mu_pred.shape[-1]):
418 | y_pred[:] += alpha_pred[:, mx] * mu_pred[:, 0, mx]
419 | y_sigm[:] += np.square(alpha_pred[:, mx] * sigma_pred[:, mx])
420 | y_sigm = np.sqrt(y_sigm)
421 |
422 | pt_i_new2_tmp = np.random.normal(y_pred, y_sigm)
423 | index_neg = np.where(pt_i_new2_tmp < 0)
424 | pt_i_new2_tmp[index_neg[0]] = 0
425 | index_big = np.where(pt_i_new2_tmp > pt_i_new2_b)
426 | pt_i_new2_tmp[index_big[0]] = pt_i_new2_b[index_big[0]]
427 | pt_i_new2_c = pt_i_new2_tmp
428 |
429 | # put the good points and noisy points together
430 | if np.size(index_vis[0]) > 0 and (np.size(pt_x_new) - np.size(index_vis[0])) > 0:
431 | pt_x_new2 = np.hstack((pt_x_new2_a, pt_x_new2_c))
432 | pt_y_new2 = np.hstack((pt_y_new2_a, pt_y_new2_c))
433 | pt_z_new2 = np.hstack((pt_z_new2_a, pt_z_new2_c))
434 | pt_i_new2 = np.hstack((pt_i_new2_a, pt_i_new2_c))
435 | if np.size(index_vis[0]) > 0 and (np.size(pt_x_new) - np.size(index_vis[0])) == 0:
436 | pt_x_new2 = pt_x_new2_a
437 | pt_y_new2 = pt_y_new2_a
438 | pt_z_new2 = pt_z_new2_a
439 | pt_i_new2 = pt_i_new2_a
440 | if np.size(index_vis[0]) == 0 and (np.size(pt_x_new) - np.size(index_vis[0])) > 0:
441 | pt_x_new2 = pt_x_new2_c
442 | pt_y_new2 = pt_y_new2_c
443 | pt_z_new2 = pt_z_new2_c
444 | pt_i_new2 = pt_i_new2_c
445 |
446 | # put all the points together
447 | if np.size(pt_x_new1) > 0:
448 | cloud_points1 = np.transpose(np.vstack((pt_x_new1, pt_y_new1, pt_z_new1, pt_i_new1)))
449 | if np.size(pt_x_new2) > 0:
450 | cloud_points2 = np.transpose(np.vstack((pt_x_new2, pt_y_new2, pt_z_new2, pt_i_new2)))
451 |
452 | if np.size(pt_x_new1) > 0 and np.size(pt_x_new2) > 0:
453 | cloud_points = np.vstack((cloud_points1, cloud_points2))
454 | if np.size(pt_x_new1) > 0 and np.size(pt_x_new2) == 0:
455 | cloud_points = cloud_points1
456 | if np.size(pt_x_new1) == 0 and np.size(pt_x_new2) > 0:
457 | cloud_points = cloud_points2
458 |
459 | # if np.size(pt_x_new0) > 0:
460 | # cloud_points0 = np.transpose(np.vstack((pt_x_new0, pt_y_new0, pt_z_new0)))
461 | # cloud_points = np.vstack((cloud_points, cloud_points0))
462 |
463 | fields = [PointField('x', 0, PointField.FLOAT32, 1),
464 | PointField('y', 4, PointField.FLOAT32, 1),
465 | PointField('z', 8, PointField.FLOAT32, 1),
466 | PointField('intensity', 12, PointField.FLOAT32, 1),
467 | # PointField('rgba', 12, PointField.UINT32, 1),
468 | ]
469 |
470 | #header
471 | header = std_msgs.msg.Header()
472 | header.stamp = rospy.Time.now()
473 | header.frame_id = 'velodyne'
474 | #create pcl from points
475 | new_points = pc2.create_cloud(header, fields, cloud_points)
476 | pub.publish(new_points)
477 | # rospy.loginfo(points)
478 |
479 | if __name__ == '__main__':
480 |
481 | visibility = 50 # set the visibility
482 | sigma = 10 # set the threshold for good points
483 | intensity = 120 # set the intensity threshold to distinguish diffuse reflectors and retro-reflectors
484 | max_range = 35 # maximal simulation range
485 | disappear_model = 0 # choose the model for disappear visibility, set 0 for GPR and set 1 for MDN
486 | range_model = 1 # choose the model for ranging estimation, set 0 for GPR and set 1 for MDN
487 | intensity_model = 1 # choose the model for intensity estimation, set 0 for GPR and set 1 for MDN
488 | c = 1 # parameters for MDN, don't change
489 | md = 1 # parameters for MDN, don't change
490 | mr = 1 # parameters for MDN, don't change
491 | mi = 1 # parameters for MDN, don't change
492 |
493 | np.random.seed(1)
494 | rospy.init_node('lanoising', anonymous=True)
495 |
496 | gpr1_disappear_path = rospy.get_param('~gpr1_disappear_path')
497 | gpr2_disappear_path = rospy.get_param('~gpr2_disappear_path')
498 | gpr1_disappear = joblib.load(gpr1_disappear_path)
499 | gpr2_disappear = joblib.load(gpr2_disappear_path)
500 | mdn1_disappear_path = rospy.get_param('~mdn1_disappear_path')
501 | mdn2_disappear_path = rospy.get_param('~mdn2_disappear_path')
502 | mdn1_disappear = load_model(mdn1_disappear_path, custom_objects={'elu_modif': elu_modif, 'log_sum_exp': log_sum_exp, 'mean_log_Gaussian_like': mean_log_Gaussian_like_disappear})
503 | mdn2_disappear = load_model(mdn2_disappear_path, custom_objects={'elu_modif': elu_modif, 'log_sum_exp': log_sum_exp, 'mean_log_Gaussian_like': mean_log_Gaussian_like_disappear})
504 |
505 | gpr1_range_path = rospy.get_param('~gpr1_range_path')
506 | gpr2_range_path = rospy.get_param('~gpr2_range_path')
507 | gpr1_range = joblib.load(gpr1_range_path)
508 | gpr2_range = joblib.load(gpr2_range_path)
509 | mdn1_range_path = rospy.get_param('~mdn1_range_path')
510 | mdn2_range_path = rospy.get_param('~mdn2_range_path')
511 | mdn1_range = load_model(mdn1_range_path, custom_objects={'elu_modif': elu_modif, 'log_sum_exp': log_sum_exp, 'mean_log_Gaussian_like': mean_log_Gaussian_like_range})
512 | mdn2_range = load_model(mdn2_range_path, custom_objects={'elu_modif': elu_modif, 'log_sum_exp': log_sum_exp, 'mean_log_Gaussian_like': mean_log_Gaussian_like_range})
513 |
514 | gpr1_intensity_path = rospy.get_param('~gpr1_intensity_path')
515 | gpr2_intensity_path = rospy.get_param('~gpr2_intensity_path')
516 | gpr1_intensity = joblib.load(gpr1_intensity_path)
517 | gpr2_intensity = joblib.load(gpr2_intensity_path)
518 | mdn1_intensity_path = rospy.get_param('~mdn1_intensity_path')
519 | mdn2_intensity_path = rospy.get_param('~mdn2_intensity_path')
520 | mdn1_intensity = load_model(mdn1_intensity_path, custom_objects={'elu_modif': elu_modif, 'log_sum_exp': log_sum_exp, 'mean_log_Gaussian_like': mean_log_Gaussian_like_intensity})
521 | mdn2_intensity = load_model(mdn2_intensity_path, custom_objects={'elu_modif': elu_modif, 'log_sum_exp': log_sum_exp, 'mean_log_Gaussian_like': mean_log_Gaussian_like_intensity})
522 |
523 | mdn1_disappear._make_predict_function()
524 | mdn2_disappear._make_predict_function()
525 | mdn1_range._make_predict_function()
526 | mdn2_range._make_predict_function()
527 | mdn1_intensity._make_predict_function()
528 | mdn2_intensity._make_predict_function()
529 |
530 | sub = rospy.Subscriber('velodyne_points', PointCloud2, callback)
531 | pub = rospy.Publisher('filtered_points', PointCloud2, queue_size=10)
532 |
533 | rospy.spin()
534 |
--------------------------------------------------------------------------------