├── _config.yml
├── calib_gui.py
├── camera_capture.py
├── collisionNew.py
├── docs
├── .DS_Store
├── README.md
├── images
│ ├── .DS_Store
│ ├── 2dto3d.png
│ ├── DSC00340.JPG
│ ├── DSC00341.JPG
│ ├── DSC00342.JPG
│ ├── block.png
│ ├── lctoverlay.png
│ ├── pcl1.png
│ ├── pcl2.png
│ ├── pointcloud8.png
│ ├── puck.png
│ ├── transform.png
│ └── yolo.png
└── index.html
├── lidar_projection.py
├── main.py
├── orig_main.py
├── server.c
├── transpose.py
├── utils_data.py
├── velodyne_capture_v2.py
├── velodyne_capture_v3.py
├── vlp16.py
└── yolo.py
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-cayman
--------------------------------------------------------------------------------
/calib_gui.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import sys
6 | import os
7 | import argparse
8 | import numpy as np
9 | import matplotlib
10 | matplotlib.use("TkAgg")
11 | import matplotlib.pyplot as plt
12 | from skimage.io import imread
13 | from utils_data import *
14 | from utils_data import CalibFields
15 | import tkinter as tk
16 | import functools
17 | import pandas as pd
18 |
19 | class CalibParam:
20 | def __init__(self,dict_calib,R_0=None):
21 | if R_0 is None:
22 | self.R_0 = np.array([[0., 0., -1.],
23 | [0., -1., 0.],
24 | [1., 0., 0.]])
25 | else:
26 | self.R_0 = R_0
27 |
28 | self.dict_calib = dict_calib
29 |
30 | # Extrinsic Rotation matrix before changing Coordiate System.
31 | # (Initial value before applying rotation inputs)
32 | self.R_l = np.dot(self.R_0.T,
33 | self.dict_calib[CalibFields.extrinsic][:3,:3])
34 |
35 | # Translation vector
36 | self.v_t = np.copy(self.dict_calib[CalibFields.extrinsic][:3,3])
37 |
38 | def set_Rmat(self,R_in=np.eye(3),v_t=np.zeros(3)):
39 | self.dict_calib[CalibFields.extrinsic][:3,3] = self.v_t + v_t
40 | self.dict_calib[CalibFields.extrinsic][:3,:3] = np.dot(self.R_0,
41 | np.dot(R_in,self.R_l))
42 |
43 | def save_Rmat(self,dpath):
44 | f_int = os.path.join(dpath,'calib_intrinsic.txt')
45 | f_ext = os.path.join(dpath,'calib_extrinsic.txt')
46 | np.savetxt(f_int,
47 | self.dict_calib[CalibFields.intrinsic],
48 | fmt='%.3f',
49 | delimiter=' ')
50 | np.savetxt(f_ext,
51 | self.dict_calib[CalibFields.extrinsic],
52 | fmt='%.3f',
53 | delimiter=' ')
54 |
55 | class CalibGUI(tk.Tk):
56 | def __init__(self,im,points,cparam,outdir,_D_MAX=75.0,_D_MIN=2.0):
57 | tk.Tk.__init__(self)
58 | self.title('Lidar-Cam calibrator')
59 | self.im = im
60 | self.points = points
61 | self.im_height,self.im_width = np.shape(im)[:2]
62 | self.cparam = cparam
63 | self.outdir = outdir
64 | self.d_max = _D_MAX
65 | self.d_min = _D_MIN
66 |
67 | # Define Variables in GUI
68 | self.var_xr = tk.DoubleVar()
69 | self.var_yr = tk.DoubleVar()
70 | self.var_zr = tk.DoubleVar()
71 | self.var_xt = tk.DoubleVar()
72 | self.var_yt = tk.DoubleVar()
73 | self.var_zt = tk.DoubleVar()
74 |
75 | # Define Widgets GUI
76 | self.label_rot = tk.Label(self, text="\nRotations (degree)")
77 | self.scale_xr = tk.Scale(self, variable=self.var_xr,
78 | from_=-5, to=5,
79 | resolution=0.01,
80 | length=300)
81 | self.label_xr = tk.Label(self, text="x (forward)")
82 | self.scale_yr = tk.Scale(self, variable=self.var_yr,
83 | from_=-5, to=5,
84 | resolution=0.01,
85 | length=300)
86 | self.label_yr = tk.Label(self, text="y (left)")
87 | self.scale_zr = tk.Scale(self, variable=self.var_zr,
88 | from_=-5, to=5,
89 | resolution=0.01,
90 | length=300)
91 | self.label_zr = tk.Label(self, text="z (up)")
92 | self.label_trans = tk.Label(self, text="\nTranslation (m)")
93 | self.scale_xt = tk.Scale(self, variable=self.var_xt,
94 | from_=-2, to=2,
95 | resolution=0.01,
96 | length=300)
97 | self.label_xt = tk.Label(self, text="x (down)")
98 | self.scale_yt = tk.Scale(self, variable=self.var_yt,
99 | from_=-2, to=2,
100 | resolution=0.01,
101 | length=300)
102 | self.label_yt = tk.Label(self, text="y (right)")
103 | self.scale_zt = tk.Scale(self, variable=self.var_zt,
104 | from_=-2, to=2,
105 | resolution=0.01,
106 | length=300)
107 | self.label_zt = tk.Label(self, text="z (forward)")
108 | self.button_plot = tk.Button(self, text="Update and Plot Result",
109 | command=self.imlidar_plot)
110 | self.button_save = tk.Button(self, text="Save Result",
111 | command=self.matsave)
112 |
113 | # Locate widgets
114 | self.label_rot.grid(row=0,columnspan=6)
115 | self.scale_xr.grid(row=1,column=0)
116 | self.label_xr.grid(row=1,column=1)
117 | self.scale_yr.grid(row=1,column=2)
118 | self.label_yr.grid(row=1,column=3)
119 | self.scale_zr.grid(row=1,column=4)
120 | self.label_zr.grid(row=1,column=5)
121 | self.label_trans.grid(row=2,columnspan=6)
122 | self.scale_xt.grid(row=3,column=0)
123 | self.label_xt.grid(row=3,column=1)
124 | self.scale_yt.grid(row=3,column=2)
125 | self.label_yt.grid(row=3,column=3)
126 | self.scale_zt.grid(row=3,column=4)
127 | self.label_zt.grid(row=3,column=5)
128 | self.button_plot.grid(row=4,columnspan=6)
129 | self.button_save.grid(row=5,columnspan=6)
130 |
131 | plt.imshow(self.im)
132 | plt.show()
133 |
134 | def imlidar_plot(self,cmap='brg'):
135 | # Rotation matrix
136 | r_x = self.var_xr.get()*np.pi/180.0
137 | r_y = self.var_yr.get()*np.pi/180.0
138 | r_z = self.var_zr.get()*np.pi/180.0
139 |
140 | R_x = np.eye(3)
141 | R_x[1,1] = np.cos(r_x)
142 | R_x[2,2] = np.cos(r_x)
143 | R_x[1,2] = -np.sin(r_x)
144 | R_x[2,1] = np.sin(r_x)
145 |
146 | R_y = np.eye(3)
147 | R_y[0,0] = np.cos(r_y)
148 | R_y[2,2] = np.cos(r_y)
149 | R_y[0,2] = np.sin(r_y)
150 | R_y[2,0] = -np.sin(r_y)
151 |
152 | R_z = np.eye(3)
153 | R_z[0,0] = np.cos(r_z)
154 | R_z[1,1] = np.cos(r_z)
155 | R_z[0,1] = -np.sin(r_z)
156 | R_z[1,0] = np.sin(r_z)
157 |
158 | # Translation vector
159 | vt_add = np.zeros(3)
160 | vt_add[0] = self.var_xt.get()
161 | vt_add[1] = self.var_yt.get()
162 | vt_add[2] = self.var_zt.get()
163 |
164 | R_in = functools.reduce(np.dot,[R_x,R_y,R_z])
165 |
166 | self.cparam.set_Rmat(R_in,vt_add)
167 |
168 | points2D, pointsDist = project_lidar_to_img(self.cparam.dict_calib,
169 | self.points,
170 | self.im_height,
171 | self.im_width)
172 | # Clip distance with min/max values
173 | for i_pt,pdist in enumerate(pointsDist):
174 | pointsDist[i_pt] = self.d_max if pdist>self.d_max \
175 | else pdist if pdist>self.d_min \
176 | else self.d_min
177 | # Rescale to 1~255 with 1/d value
178 | pointsDist = np.round(minmax_scale(1.0/pointsDist,
179 | 1.0/self.d_max,1.0/self.d_min,
180 | 1,255)).astype('uint8')
181 | # Color points w.r.t inversed distance values
182 | _CMAP = plt.get_cmap(cmap)
183 | pointsColor = np.array([_CMAP(1.0-pdist/255.0)[:3] \
184 | for pdist in pointsDist])
185 | plt.clf()
186 | plt.imshow(self.im)
187 | plt.scatter(points2D[:,1],points2D[:,0],
188 | c=pointsColor,
189 | s=1)
190 | plt.xlim(0,self.im_width-1)
191 | plt.ylim(self.im_height-1,0)
192 | plt.draw()
193 |
194 | def matsave(self):
195 | self.cparam.save_Rmat(self.outdir)
196 |
197 |
198 | def main(_):
199 | dict_calib = loadCalib(args.f_int,args.f_ext,ltype=args.ltype)
200 |
201 | cparam = CalibParam(dict_calib)
202 | im = imread(args.f_img)
203 | # im_height,im_width = np.shape(im)[:2]
204 | points = None
205 | if args.f_lidar.lower().endswith('.csv'):
206 | df = pd.read_csv(args.f_lidar)
207 | points = df.as_matrix()[:,:3]
208 | else:
209 | points = np.fromfile(args.f_lidar).reshape(-1,3)
210 |
211 | calib_app = CalibGUI(im,points,cparam,args.outdir)
212 | calib_app.mainloop()
213 |
214 | def parse_args():
215 | parser = argparse.ArgumentParser(description=
216 | 'GUI LIDAR-CAM calibration ')
217 | parser.add_argument('-image', dest='f_img',
218 | help='File path to image',
219 | default=None, type=str)
220 | parser.add_argument('-lidar', dest='f_lidar',
221 | help='File path to lidar',
222 | default=None, type=str)
223 | parser.add_argument('-outdir', dest='outdir',
224 | help='Path to the output file.(excluding file name)',
225 | default='', type=str)
226 | parser.add_argument('-ext_init', dest='f_ext',
227 | help='Path to the initial extrinsic matrix file.',
228 | default = None, type=str)
229 | parser.add_argument('-int_init', dest='f_int',
230 | help='Path to the initial intrinsic matrix file.',
231 | default = None, type=str)
232 | parser.add_argument('-ltype', dest='ltype',
233 | help='Type of Lidar (velo, m8)',
234 | default = 'm8', type=str)
235 | args = parser.parse_args()
236 | return args
237 |
238 | if __name__ == "__main__":
239 | args = parse_args()
240 | print("Called with args:")
241 | print(args)
242 | sys.exit(main(args))
243 |
--------------------------------------------------------------------------------
/camera_capture.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | # Camera 0 is the integrated web cam on my netbook
4 | camera_port = 1
5 |
6 | #Number of frames to throw away while the camera adjusts to light levels
7 | ramp_frames = 30
8 |
9 | # Now we can initialize the camera capture object with the cv2.VideoCapture class.
10 | # All it needs is the index to a camera port.
11 | camera = cv2.VideoCapture(camera_port)
12 |
13 | # Captures a single image from the camera and returns it in PIL format
14 | def get_image():
15 | # read is the easiest way to get a full image out of a VideoCapture object.
16 | camera = cv2.VideoCapture(camera_port)
17 | retval, im = camera.read()
18 | del(camera)
19 | return im
20 |
21 | # Ramp the camera - these frames will be discarded and are only used to allow v4l2
22 | # to adjust light levels, if necessary
23 | for i in xrange(ramp_frames):
24 | temp = get_image()
25 | print("Taking image...")
26 | # Take the actual image we want to keep
27 | camera_capture = get_image()
28 | file = "test_image.png"
29 | # A nice feature of the imwrite method is that it will automatically choose the
30 | # correct format based on the file extension you provide. Convenient!
31 | cv2.imwrite(file, camera_capture)
32 |
33 | # You'll want to release the camera, otherwise you won't be able to create a new
34 | # capture object until your script exits
35 | del(camera)
--------------------------------------------------------------------------------
/collisionNew.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Mon Feb 19 12:07:05 2018
5 |
6 | @author:
7 | """
8 |
9 | import shapely.geometry
10 | import shapely.affinity
11 | import matplotlib.pyplot as plt
12 | from descartes import PolygonPatch
13 | import math, random
14 | # import winsound
15 | import numpy as np
16 | import os
17 |
18 | delta_t = 3.0
19 |
20 |
21 |
22 | #initialize GPS capture
23 | """
24 | import socket
25 | import pandas as pd
26 | HOST = "192.168.1.201"
27 | PORT = 8308
28 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
29 | soc.bind(('', PORT))
30 | data = soc.recv(554)
31 | data_string = data[206:270].decode('utf-8')
32 | data_list = data_string.split(",")
33 | """
34 |
35 |
36 | #Initialize Kalman stuff
37 | delta_t = 1
38 | F_ = np.array([[1, 0, delta_t, 0],[0, 1, 0, delta_t],[0, 0, 1, 0], [0, 0, 0, 1]])
39 | R_laser = np.eye(4)*0.0225
40 | H_laser = np.array([[1,0,0,0],[0,1,0,0]])
41 | #P_ = np.array([[1,0,0,0],[0,1,0,0],[0,0,1000,0],[0,0,0,1000]])
42 | Q_ = np.array([[0.25*R_laser[0,0]*delta_t**4, 0, 0.5*R_laser[0,0]*delta_t**3,0],
43 | [0,0.25*R_laser[1,1]*delta_t**4 ,0,0.5*R_laser[0,0]*delta_t**3],
44 | [0.5*R_laser[0,0]*delta_t**3,0,R_laser[0,0]*delta_t**2,0],
45 | [0, 0.5*R_laser[0,0]*delta_t**3, 0,R_laser[1,1]*delta_t**2 ]])
46 |
47 |
48 | #convert knots to meters per second
49 | def kn_to_ms(kn):
50 | return kn*0.514444
51 |
52 | #gets data from object tracking module (evan's thing)
53 | def object_tracking(obj):
54 | obj.calc()
55 | return obj.x_
56 |
57 | def update_F_and_Q(delta_t):
58 | F_[0,2] = delta_t
59 | Q_[1,3] = delta_t
60 |
61 | #represents a 2D rotated rectangle, angle is from 0 to 360
62 | class Rectangle:
63 | def __init__(self, center_x, center_y, width, length, angle):
64 | self.cx = center_x
65 | self.cy = center_y
66 | self.l = length
67 | self.w = width
68 | self.angle = angle #0 angle is front to the top
69 | def get_contour(self):
70 | w = self.w
71 | l = self.l
72 | c = shapely.geometry.box(-w/2.0, -l/2.0, w/2.0, l/2.0)
73 | rc = shapely.affinity.rotate(c, self.angle)
74 | return shapely.affinity.translate(rc, self.cx, self.cy)
75 | #returns intersect area between two rectangles
76 | def intersection(self, other):
77 | return self.get_contour().intersection(other.get_contour())
78 | #returns true if collision
79 | def collision(self, other):
80 | overlap = self.intersection(other)
81 | if (overlap.area > 0):
82 | return True
83 | else:
84 | return False
85 |
86 | #extends rectangle
87 | class Car(Rectangle):
88 | def __init__(self, center_x, center_y, width, length, angle):
89 | self.cx = center_x
90 | self.cy = center_y
91 | self.l = length
92 | self.w = width
93 | self.angle = angle
94 | #new variables
95 | self.cx_future = self.cx
96 | self.cy_future = self.cy
97 | self.speed = 0.0 #meters/second
98 | #Kalman variables
99 | self.x_ = np.array([0, 0, 1, 1])
100 | self.P_ = np.array([[1,0,0,0],[0,1,0,0],[0,0,1000,0],[0,0,0,1000]])
101 | self.results = np.empty([1,2])
102 | self.locarray = np.empty([1,2])
103 | def update_locarray(self, locarray):
104 | self.locarray = locarray
105 | def calc(self):
106 | #Predict
107 | self.x_ = F_.dot(self.x_)
108 | self.P_ = F_.dot(self.P_).dot(F_.T) + Q_
109 | # Kalman Update
110 | self.z = np.array([self.locarray[0],self.locarray[1], self.locarray[2], self.locarray[3]])
111 | self.y = self.z-self.x_
112 | self.S = self.P_ + R_laser
113 | self.K = self.P_.dot(np.linalg.inv(self.S))
114 | self.x_ = self.x_ + self.K.dot(self.y)
115 | self.P_ = (np.eye(len(self.P_))-self.K).dot(self.P_)
116 | self.results = np.append(self.results, [self.x_[0:2]], axis = 0)
117 | #adjusts cx_future and cy_future as where the center of car will be after delta_t time
118 | def update_speed(self):
119 | #data = soc.recv(554)
120 | #data_string = data[206:270].decode('utf-8')
121 | #data_list = data_string.split(",")
122 | #df = pd.DataFrame(data_list)
123 | #self.speed = kn_to_ms(float(df.iloc[7][0]))
124 |
125 | #self.speed = float(random.randint(1,15))
126 | self.speed = 0
127 |
128 | #self.speed = math.sqrt((self.x_[2]*self.x_[2])+(self.x_[3]*self.x_[3]))
129 | def update_for_box(self):
130 | hyp = self.speed*delta_t
131 | changeiny = hyp*math.sin(math.radians(90.0-(-1.0*self.angle)))
132 | changeinx = hyp*math.cos(math.radians(90.0-(-1.0*self.angle)))
133 | self.cy_future = self.cy+changeiny
134 | self.cx_future = self.cx+changeinx
135 | return hyp
136 | #adjusts cx_future and cy_future as where the center of the car's path will be after delta_t time
137 | def update_for_path(self):
138 | hyp = self.speed*delta_t
139 | changeiny = (hyp/2)*math.sin(math.radians(90.0-(-1.0*self.angle)))
140 | changeinx = (hyp/2)*math.cos(math.radians(90.0-(-1.0*self.angle)))
141 | self.cy_future = self.cy+changeiny
142 | self.cx_future = self.cx+changeinx
143 | return hyp
144 | def get_contour_future(self):
145 | self.update_for_box()
146 | w = self.w
147 | l = self.l
148 | c = shapely.geometry.box(-w/2.0, -l/2.0, w/2.0, l/2.0)
149 | rc = shapely.affinity.rotate(c, self.angle)
150 | return shapely.affinity.translate(rc, self.cx_future, self.cy_future)
151 | def get_path(self):
152 | dist = self.update_for_path()
153 | w = self.w
154 | l = dist+self.l
155 | c = shapely.geometry.box(-w/2.0, -l/2.0, w/2.0, l/2.0)
156 | rc = shapely.affinity.rotate(c, self.angle)
157 | return shapely.affinity.translate(rc, self.cx_future, self.cy_future)
158 | def intersection_future(self, other):
159 | return self.get_path().intersection(other.get_path())
160 | def collision_future(self, other):
161 | overlap = self.intersection_future(other)
162 | if (overlap.area > 0):
163 | return True
164 | else:
165 | return False
166 | def update_object(self):
167 | array = object_tracking(self)
168 | self.cx = array[0]
169 | self.cy = array[1]
170 | vx = array[2]
171 | vy = array[3]
172 |
173 | if vx == 0 and vy == 0:
174 | angle = 0
175 | elif vy == 0:
176 | if (vx > 0):
177 | angle = -90.0
178 | elif (vx < 0):
179 | angle = 90.0
180 | elif vx == 0:
181 | if (vy > 0):
182 | angle = 0
183 | elif (vy < 0):
184 | angle = 180
185 | else:
186 | angle = math.degrees(math.atan(array[3]/array[2]))
187 | if vx>0 and vy>0: #quadrant 1 #pos/pos = pos
188 | angle = (90.0 - angle)*-1.0
189 | if vx<0 and vy>0: #quadrant 2 #pos/neg = neg
190 | angle = (90.0 + angle)
191 | if vx<0 and vy<0: #quadrant 3 #neg/neg = pos
192 | angle = angle+90
193 | if vx>0 and vy<0: #quadrant 4 #neg/pos = neg
194 | angle = (90 + angle) + 180
195 | self.angle = angle
196 | self.speed = abs(math.sqrt((vx*vx)+(vy*vy)))
197 |
198 |
199 |
200 | #random initial stuff
201 | car_dim = {
202 | "length": 12.0,
203 | "width": 2.0
204 | }
205 |
206 | #random initial stuff
207 | fake_obj = {
208 | "length": 12.0,
209 | "width": 2.0,
210 | "x": 22.0,
211 | "y": 15.0,
212 | "angle": 75,
213 | "speed": 6.0
214 | }
215 |
216 |
217 | #returns true if car and obj intersect
218 | def collision_detection(car, obj):
219 | return car.collision_future(obj)
220 |
221 | def alert_linx():
222 | duration = 1 # second
223 | freq = 440 # Hz
224 | os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (duration, freq))
225 |
226 | def alert():
227 | print ("***CRASH DETECTED***")
228 | alert_linux()
229 | frequency = 600 # Set Frequency To 2500 Hertz
230 | duration = 750 # Set Duration To 1000 ms == 1 second
231 | # winsound.Beep(frequency, duration)
232 | # winsound.Beep(frequency, duration)
233 |
234 | ############## the "MAIN"####################
235 |
236 |
237 | """GET LOCARRAY INPUT"""
238 | # locarray1 = np.empty([1,3])
239 | # with open("obj_pose-laser-radar-synthetic-ukf-input1.txt","r") as f:
240 | # for line in f:
241 | # location = line.lower()
242 | # location = location.replace("\n","")
243 | # location = location.split("\t")
244 | # for n in range(len(location)):
245 | # location[n] = float(location[n])
246 | # locarray1 = np.append(locarray1, [location], axis = 0)
247 | # plt.plot(locarray1[1:,0],locarray1[1:,1])
248 | # plt.title("Input")
249 | # plt.show()
250 |
251 |
252 | # my_car = Car(0,0,car_dim["width"],car_dim["length"],0)
253 | # other_car = Car(0,0,fake_obj["width"],fake_obj["length"],0)
254 | # other_car.update_locarray(locarray)
255 |
256 |
257 | # fig = plt.figure(1, figsize=(15, 8))
258 | # ax = fig.add_subplot(121)
259 | # ax.set_xlim(-40, 40)
260 | # ax.set_ylim(-40, 40)
261 |
262 |
263 | # my_car.update_speed()
264 | # other_car.update_object()
265 |
266 | # ax.add_patch(PolygonPatch(my_car.get_contour(), fc='#990000', alpha=1))
267 | # ax.add_patch(PolygonPatch(my_car.get_path(), fc='#990000', alpha=.5))
268 | # ax.add_patch(PolygonPatch(other_car.get_contour(), fc='#000099', alpha=1))
269 | # ax.add_patch(PolygonPatch(other_car.get_path(), fc='#000099', alpha=.5))
270 |
271 |
272 | # if(collision_detection(my_car,other_car)):
273 | # alert()
274 |
275 |
276 | # plt.show()
277 |
278 |
279 |
280 |
--------------------------------------------------------------------------------
/docs/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/.DS_Store
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Collision Prediction System Prototyping
2 |
3 | >Collision Prediction with LIDAR and Camera
4 |
5 | EE 464H/R Senior Design, Spring 2018 - Team 11
6 |
7 | Team Members: Aditya Kharosekar, Chang Park, Evan Reid, Roberto Rioja, Steven Han, Trevor Eggenberger, and [Vivian Tan](https://vivianistan.github.io)
8 |
9 | Faculty Mentor: Dr. Joydeep Ghosh
10 |
11 | Graduate Student Mentors: Michael Motro and Taewan Kim
12 |
13 | Special Thanks: TxDOT (Texas Department of Transportation)
14 |
15 | Links:
16 | * [Project Site](https://ut18-senior-design.github.io/Object-Detection-and-Calibrations/#/)
17 | * [Code](https://github.com/UT18-Senior-Design/Object-Detection-and-Calibrations/)
18 |
19 | # Description:
20 |
21 | This project is a collision warning system prototype that can warn drivers of impending collisions. The Texas Department of Transportation (TxDOT) and CARSTOP research group is interested in developing prototypes of collision warning (CW) systems to improve safety in traffic scenarios such as urban intersections, rural roads, and areas with heavy bicycle and pedestrian traffic. With the rise of autonomous technologies, which are currently secretive and expensive to replicate, we decided to create a collision warning prototype with open source software and lower-cost versions of sensors used in autonomous vehicles. Our system can detect objects and track each one's position in real time using LIDAR and a camera. It uses Kalman filtering and position data from the LIDAR and camera to predict collisions with the host vehicle.
22 |
23 |
24 | # System Overview
25 |
26 | Here is the block diagram of the system:
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 | ## Object Detection:
37 |
38 | To detect objects present in the frame, we are using an object detection algorithm called You Only Look Once ([YOLO](https://pjreddie.com/darknet/yolo/)) which is capable of detecting bounding boxes of objects in real-time. It applies a neural network to the entire image or frame, splitting it into different regions for which it then calculates bounding boxes and probabilities.
39 |
40 | *YOLO Object Detection Overview:*
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | ## Lidar-Camera Transformation:
52 |
53 | Our LIDAR-Camera Transformation (LCT) system is used to compare 3D LIDAR points to positions inside of our camera footage. We use a rotation and translation matrix to convert the 3-dimensional point cloud into the 2-dimensional domain of the camera.
54 |
55 |
56 | *LIDAR-Camera transform equation:*
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 | *Illustration of 3D-2D Projection:*
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 | *Image of point cloud overlayed on truck using LCT:*
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 | We were able to detect a car's distance within 4% error and a person's distance within 2% error.
87 |
88 | The matrices we used were calculated by hand for very specific configurations of the camera and LIDAR positions.
89 |
90 | ## Collision Prediction
91 |
92 | We used Kalman filtering to predict the trajectory of detected objects.
93 |
94 |
95 | # Testing and Results
96 |
97 | We mainly focused on testing:
98 |
99 | * LIDAR point cloud collection verification
100 | * Object position tracking accuracy
101 | * LIDAR-Camera calibration overall quality
102 |
103 | # Potential future work
104 |
105 | * Increase quality of LIDAR-Camera calibration, which could increase accuracy of position tracking
106 | * Add functionality for predicting trajectory of multiple objects
107 | * Integrate more sensors (radar, additional cameras, etc.)
108 | * Integrate/develop basic V2V communication protocol
109 |
110 | # Miscellaneous
111 | ## Sensors
112 | * LIDAR: [Velodyne VLP-16](http://velodynelidar.com/vlp-16.html)
113 | * Camera: [Logitech c920 1080P HD Webcam](https://www.logitech.com/en-us/product/hd-pro-webcam-c920)
114 | ## Getting Started:
115 | Most of the code we used is available on Github, however our modified YOLO code is not uploaded yet.
116 |
117 | ### To run object detection + tracking:
118 |
119 | 1. Make sure your camera and LIDAR are in the correct positions (see calibration for more detail)
120 |
121 | 2. Run the YOLO code from `/darknet` using
122 |
123 | ./darknet detector demo cfg/coco.data cfg/yolo.cfg yolo.weights
124 |
125 | 3. Then run `python main.py` to run the program
126 |
127 |
128 |
129 | ### To check the LIDAR-Camera Matrix transformation:
130 | 1. Place your camera and LIDAR in the correct position
131 |
132 | 2. Run `python lidar_projection.py` to see the lidar projection on a video stream with your calibration matrix
133 |
134 |
135 | ### To capture a point cloud in .csv format:
136 |
137 | 1. Make sure your LIDAR is conneted
138 | 2. Run `python velodyne_capture_v3.py` and your point cloud will be saved as `pcl.csv` by default
139 |
140 | ## Progression
141 | ### Apollo:
142 | Our first approach was to try using [Apollo](https://github.com/ApolloAuto/apollo), an open source autonomous driving platform by Baidu. Though this platform is meant for autonomous driving, we thought we could leverage some of their tools in a collision prediction/warning system. However, Apollo requires very expensive hardware such as the 64-layer Velodyne LIDAR, which we could not get access to. We were able to successfully install Apollo and run Apollo's Dreamview simulator demo, but ran into problems trying to integrate our existing sensors into the system. We posted our issues to their Github, but after several weeks of unsuccessfully trying to integrate our sensors to Apollo, in the interest of time we decided it would be best for us to try other platforms/approaches to creating a collision warning system with our available sensors.
143 |
144 | ### ROS:
145 | The next platform we tried was using ROS (Robot Operating Systems), a commonly used framework for robot software development. We decided to use ROS because Apollo is built on ROS, so we could try using some of the same software drivers Apollo software uses (such as the Velodyne and camera drivers) to implement a simpler collision warning system. We were able to successfully install ROS Kinetic and the Velodyne drivers on one of our machines. We could use the drivers to print display VLP-16 point clouds using the visualizer. We used [this](https://wiki.ros.org/velodyne/Tutorials/Getting%20Started%20with%20the%20Velodyne%20VLP16) tutorial. However, when we tried to calibrate our camera and LIDAR together, we couldn't get any of the existing ROS drivers to work successfully due to various software/installation issues.
146 |
147 | ### YOLO + Python:
148 | Since we couldn't find an existing driver to calibrate our LIDAR and camera, we decided to primarily use [YOLO](https://pjreddie.com/darknet/yolo/) and write our own code in Python to create the system we needed. We use YOLO to detect objects and create bounding boxes for them, while we wrote our own code to calibrate the camera and LIDAR and predict the trajectory of the objects.
149 |
150 | ## Video Demos:
151 | * Links coming soon
152 |
153 |
158 |
159 |
--------------------------------------------------------------------------------
/docs/images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/.DS_Store
--------------------------------------------------------------------------------
/docs/images/2dto3d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/2dto3d.png
--------------------------------------------------------------------------------
/docs/images/DSC00340.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/DSC00340.JPG
--------------------------------------------------------------------------------
/docs/images/DSC00341.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/DSC00341.JPG
--------------------------------------------------------------------------------
/docs/images/DSC00342.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/DSC00342.JPG
--------------------------------------------------------------------------------
/docs/images/block.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/block.png
--------------------------------------------------------------------------------
/docs/images/lctoverlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/lctoverlay.png
--------------------------------------------------------------------------------
/docs/images/pcl1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/pcl1.png
--------------------------------------------------------------------------------
/docs/images/pcl2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/pcl2.png
--------------------------------------------------------------------------------
/docs/images/pointcloud8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/pointcloud8.png
--------------------------------------------------------------------------------
/docs/images/puck.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/puck.png
--------------------------------------------------------------------------------
/docs/images/transform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/transform.png
--------------------------------------------------------------------------------
/docs/images/yolo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UT18-Senior-Design/Object-Detection-and-Calibrations/a028f69358688e94eda5616460c7ae8f71b4f3bc/docs/images/yolo.png
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Project Documentation
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/lidar_projection.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 | import datetime
6 | import scipy
7 | import time
8 | import socket
9 | import datetime
10 | import cv2
11 |
12 | from velodyne_capture_v3 import init_velo_socket, get_pointcloud
13 | from mpl_toolkits.mplot3d import Axes3D
14 | from matplotlib.patches import Circle
15 |
16 | # Init sockets
17 | PORT = 2368
18 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
19 | soc.bind(('', PORT))
20 |
21 | # For matrix values
22 | xr = 95 * math.pi/180
23 | yr = 10 * math.pi/180
24 | zr = 0 * math.pi/180
25 |
26 |
27 | # start z by 90 y by -90
28 | # Matrices: (current projection seems to be off, needs to be fixed?)
29 | Xr = np.matrix([[1,0,0],[0,math.cos(xr),-1*math.sin(xr)],[0,math.sin(xr),math.cos(xr)]])
30 | Yr = np.matrix([[math.cos(yr),0,math.sin(yr)],[0,1,0],[-1*math.sin(yr),0,math.cos(yr)]])
31 | Zr = np.matrix([[math.cos(zr),-1*math.sin(zr),0],[math.sin(zr),math.cos(zr),0],[0,0,1]])
32 |
33 | F = np.matrix([[935,0,0],[0,935,0],[225,375,1]])
34 |
35 | R = np.matmul(Zr,Yr)
36 | R= np.matmul(R,Xr)
37 |
38 | T = np.matrix([[1.1],[0],[-1.32]])
39 |
40 | cap = cv2.VideoCapture(0)
41 | # fig, ax = plt.subplots(1)
42 | # plt.ion()
43 | # plt.show()
44 |
45 | if not cap.isOpened():
46 | raise IOError("cannot open webcam")
47 |
48 | while 1:
49 | # Get pointcloud
50 | pcl = get_pointcloud(soc)
51 | # Get frame
52 | flag, frame = cap.read()
53 | cv2.imshow('frame', frame)
54 |
55 | X= pcl[:,0]
56 | Y= pcl[:,1]
57 | Z= pcl[:,2]
58 | distance = pcl[:,3]
59 | # make A matrix (x y z)
60 | size= len(X)
61 | X1= np.matrix.transpose(X)
62 | Y1= np.matrix.transpose(Y)
63 | Z1= np.matrix.transpose(Z)
64 | A=[X1,Y1,Z1]
65 | A= np.matrix([X1,Y1 ,Z1])
66 |
67 | T1=np.matrix.transpose(T)
68 | T2= np.repeat(T1,size,axis=0)
69 |
70 | T2= np.matrix.transpose(T2)
71 |
72 | c2 = np.matmul((F), (R))
73 | c2 = .25*np.matmul((c2),(A+T2))
74 |
75 | # Plot points on frame
76 | for x in np.nditer(c2, flags = ['external_loop'], order = 'F'):
77 | cv2.circle(frame, (int(x[0]),int(x[1])), 1, (255,0,0), thickness=-1)
78 | #print(x)
79 | cv2.imshow('frame', frame)
80 |
81 | c = cv2.waitKey(1)
82 | if c == 27:
83 | break
84 |
85 | cap.release()
86 | cap.destroyAllWindows()
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 | import datetime
6 | import scipy
7 | import time
8 | import socket
9 | import datetime
10 |
11 | from client import init_yolo_socket, get_bounding_boxes
12 | from velodyne_capture_v2 import init_velo_socket, get_pointcloud
13 | from mpl_toolkits.mplot3d import Axes3D
14 | from matplotlib.patches import Circle
15 | from collisionNew import Car, collision_detection
16 |
17 | '''
18 | code to get lidar point cloud, get bounding boxes for that frame,
19 | and predict a collision using Kalman filter (in progress)
20 |
21 | '''
22 |
23 | # Initialization info
24 | car_dim = {
25 | "length": 4.5,
26 | "width": 2.1
27 | }
28 |
29 | fake_obj = {
30 | "length": 4.5,
31 | "width": 2.1,
32 | "x": 22.0,
33 | "y": 15.0,
34 | "angle": 75,
35 | "speed": 6.0
36 | }
37 |
38 | # Set up sockets:
39 | HOST = "192.168.1.201"
40 | PORT = 2368
41 | TCP_IP = '127.0.0.1'
42 | TCP_PORT = 8080
43 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
44 | s.connect((TCP_IP, TCP_PORT))
45 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
46 | soc.bind(('', PORT))
47 |
48 | # For matrix values
49 | xr = 95 * math.pi/180
50 | yr = 10 * math.pi/180
51 | zr = 0 * math.pi/180
52 |
53 | # Get R matrix
54 | Xr = np.matrix([[1,0,0],[0,math.cos(xr),-1*math.sin(xr)],[0,math.sin(xr),math.cos(xr)]])
55 | Yr = np.matrix([[math.cos(yr),0,math.sin(yr)],[0,1,0],[-1*math.sin(yr),0,math.cos(yr)]])
56 | Zr = np.matrix([[math.cos(zr),-1*math.sin(zr),0],[math.sin(zr),math.cos(zr),0],[0,0,1]])
57 |
58 | F = np.matrix([[935,0,0],[0,935,0],[225,375,1]])
59 |
60 | R = np.matmul(Zr,Yr)
61 | R= np.matmul(R,Xr)
62 |
63 | # Transpose matrix?
64 | T = np.matrix([[0.9],[0],[-1.32]])
65 | T1=np.matrix.transpose(T)
66 |
67 | # Initialize previous x,y,t
68 | prev_x = 0
69 | prev_y = 0
70 | prev_t = 0
71 |
72 | # Initialize car objects:
73 | # Car location and dimensions (CHANGE)
74 | my_car = Car(0,0,car_dim["width"],car_dim["length"],0)
75 | # Detected object (CHANGE)
76 | other_car = Car(0,0,fake_obj["width"],fake_obj["length"],0)
77 |
78 | # Get x, y, z, and distance data from point cloud
79 | while 1:
80 | start = datetime.datetime.now()
81 | pcl = get_pointcloud(soc)
82 | elapsed = (datetime.datetime.now() - start).microseconds
83 | # print('pcl time: ', elapsed)
84 | X= pcl[:,0]
85 | Y= pcl[:,1]
86 | Z= pcl[:,2]
87 | distance = pcl[:,3]
88 |
89 | # GET BOUNDING BOX DATA HERE:
90 | #while 1:
91 | start = datetime.datetime.now()
92 | timestamp, numObjects, objects = get_bounding_boxes(s)
93 | print(timestamp, numObjects, objects)
94 | start = datetime.datetime.now()
95 |
96 | # make A matrix (x y z)
97 | size= len(X)
98 | X1= np.matrix.transpose(X)
99 | Y1= np.matrix.transpose(Y)
100 | Z1= np.matrix.transpose(Z)
101 | A= np.matrix([X1, Y1 ,Z1])
102 | print(A.shape)
103 | T2= np.repeat(T1,size,axis=0)
104 | T2= np.matrix.transpose(T2)
105 |
106 | # Multiply matrices (lidar points in pixel coordinates)
107 | c2 = np.matmul((F), (R))
108 | c2 = .25*np.matmul((c2),(A+T2))
109 |
110 |
111 |
112 | #print("matrix calculations: ", (datetime.datetime.now()-start).microseconds)
113 | #print('objects', numObjects)
114 | #print(datetime.datetime.now())
115 |
116 | for i in range(numObjects):
117 | left = objects[i][0]
118 | right = objects[i][1]
119 | top = objects[i][2]
120 | bottom = objects[i][3]
121 |
122 | # Center of box
123 | xcenter = (left+right)/2.0
124 | ycenter = (top+bottom)/2.0
125 |
126 | # c3 = c2
127 | # Bounding box
128 | start = datetime.datetime.now()
129 |
130 | B = np.square((c2[0,:]-xcenter))+ np.square((c2[1,:]-ycenter))
131 |
132 | # Get lidar points in bounding box
133 | #points = []
134 |
135 | #points = [[X[i], Y[i], distance[i]] for i in range(c2_T.shape[0]) if (c2_T[i,0] > left and c2_T[i,0] < right and c2_T[i,1] > top and c2_T[i,1] < bottom)]
136 | # for i in range(c2_T.shape[0]):
137 | # if c2_T[i,0] > left and c2_T[i,0] < right and c2_T[i,1] > top and c2_T[i,1] < bottom:
138 | # points.append([X[i], Y[i], distance[i]])
139 | # elapsed = (datetime.datetime.now() - start).microseconds
140 | # print(elapsed/1000)
141 | # print(len(points))
142 |
143 | # Get index of lidar point for detected object
144 | index0 = int(np.argmin(B, axis=1))
145 |
146 |
147 | #print('y', Y[index0])
148 | #print("Index of center point is:", index0)
149 |
150 | # printing x,y, and distance for detected objects
151 | print('x:{:.2f} y:{:.2f} distance: {:.2f}'.format(X[index0], Y[index0], distance[index0]));
152 |
153 | # Get inputs ready for prediction: [x,y,vx,vy,dt]
154 | x = X[index0]
155 | y = Y[index0]
156 | t = time.time()
157 |
158 | # Account for first instance:
159 | if (prev_t == 0):
160 | dt = 1
161 | vx = 0
162 | vy = 0
163 | # Else, update:
164 | else:
165 | dt = t - prev_t
166 | vx = (x - prev_x)/(t_in - prev_t)
167 | vy = (y_- prev_x)/(t)
168 | prev_x = x
169 | prev_y = y
170 | prev_t = t
171 |
172 | # Code from collisionNew.py:
173 | other_car.update_locarray([x, y, vx, vy, dt])
174 | #print('distance: {:.2f}'.format(distance[index0]))
175 |
176 | my_car.update_speed()
177 | other_car.update_object()
178 |
179 | if(collision_detection(my_car,other_car)):
180 | print('ALERT!!!')
181 | # alert()
182 |
183 | print(' ')
184 |
--------------------------------------------------------------------------------
/orig_main.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 | from matplotlib.patches import Circle
6 | import scipy
7 | from mpl_toolkits.mplot3d import Axes3D
8 | import time
9 | import socket
10 | import datetime
11 | from client import init_yolo_socket, get_bounding_boxes
12 | from velodyne_capture_v2 import init_velo_socket, get_pointcloud
13 | import datetime
14 |
15 |
16 | '''
17 | code to get xy plane from 3d point cloud + image
18 | inputs:
19 | point cloud: csv/data data frame
20 | image: .jpg
21 |
22 | '''
23 |
24 | # set up sockets
25 | HOST = "192.168.1.201"
26 | PORT = 2368
27 | TCP_IP = '127.0.0.1'
28 | TCP_PORT = 8080
29 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
30 | s.connect((TCP_IP, TCP_PORT))
31 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
32 | soc.bind(('', PORT))
33 |
34 |
35 |
36 | xr = 1.58
37 | yr = -1.42 #-1*math.pi/2.0
38 | zr = 1.58
39 |
40 | # start z by 90 y by -90
41 |
42 | # Get R matrix
43 | Xr = np.matrix([[1,0,0],[0,math.cos(xr),-1*math.sin(xr)],[0,math.sin(xr),math.cos(xr)]])
44 | Yr = np.matrix([[math.cos(yr),0,math.sin(yr)],[0,1,0],[-1*math.sin(yr),0,math.cos(yr)]])
45 | Zr = np.matrix([[math.cos(zr),-1*math.sin(zr),0],[math.sin(zr),math.cos(zr),0],[0,0,1]])
46 | R = np.matmul(Zr,Yr,Xr)
47 |
48 | # transpose matrix?
49 | T = np.matrix([[-1],[3],[.7]])
50 | T1=np.matrix.transpose(T)
51 |
52 |
53 |
54 | # get x, y, z, and distance data from dataframe
55 | while 1:
56 | start = datetime.datetime.now()
57 | pcl = get_pointcloud(soc)
58 | elapsed = (datetime.datetime.now() - start).microseconds
59 | # print('pcl time: ', elapsed)
60 | X= pcl[:,0]
61 | Y= pcl[:,1]
62 | Z= pcl[:,2]
63 | distance = pcl[:,3]
64 |
65 | # GET BOUNDING BOX DATA HERE: (hard coded for now)
66 | #while 1:
67 | start = datetime.datetime.now()
68 | timestamp, numObjects, objects = get_bounding_boxes(s)
69 |
70 | start = datetime.datetime.now()
71 |
72 | # make A matrix (x y z)
73 | size= len(X)
74 | X1= np.matrix.transpose(X)
75 | Y1= np.matrix.transpose(Y)
76 | Z1= np.matrix.transpose(Z)
77 | A= np.matrix([X1, Y1 ,Z1])
78 | T2= np.repeat(T1,size,axis=0)
79 | T2= np.matrix.transpose(T2)
80 | # multiply matrices
81 | c2 = 100*np.matmul((R),(A-T2))
82 | #print("matrix calculations: ", (datetime.datetime.now()-start).microseconds)
83 | #print('objects', numObjects)
84 | # get conter of bounding box
85 | #print(datetime.datetime.now())
86 | for i in range(numObjects):
87 | xcenter= (objects[i][0]+objects[i][1])/2.0
88 | ycenter= (objects[i][2]+objects[i][3])/2.0
89 | c3= c2
90 | B= np.square((c3[0,:]-xcenter))+ np.square((c3[1,:]-ycenter))
91 | index0= int(np.argmin(B, axis=1))
92 | #print('y', Y[index0])
93 | #print("Index of center point is:", index0)
94 | print('x:{:.2f} y:{:.2f} distance: {:.2f}'.format(X[index0], Y[index0], distance[index0]));
95 | #make sure we uncomment out the previous line with distance
96 | #print(distance[int(index0)]) since the index is not just an int?
97 | print(' ')
--------------------------------------------------------------------------------
/server.c:
--------------------------------------------------------------------------------
1 | // Server side C/C++ program to demonstrate Socket programming
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #define PORT 8080
8 | int main(int argc, char const *argv[])
9 | {
10 | int server_fd, new_socket, valread;
11 | struct sockaddr_in address;
12 | int opt = 1;
13 | int addrlen = sizeof(address);
14 | char buffer[1024] = {0};
15 | char *hello = "Hello from server";
16 |
17 | // Creating socket file descriptor
18 | if ((server_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0)
19 | {
20 | perror("socket failed");
21 | exit(EXIT_FAILURE);
22 | }
23 |
24 | // Forcefully attaching socket to the port 8080
25 | if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT,
26 | &opt, sizeof(opt)))
27 | {
28 | perror("setsockopt");
29 | exit(EXIT_FAILURE);
30 | }
31 | address.sin_family = AF_INET;
32 | address.sin_addr.s_addr = INADDR_ANY;
33 | address.sin_port = htons( PORT );
34 |
35 | // Forcefully attaching socket to the port 8080
36 | if (bind(server_fd, (struct sockaddr *)&address,
37 | sizeof(address))<0)
38 | {
39 | perror("bind failed");
40 | exit(EXIT_FAILURE);
41 | }
42 | if (listen(server_fd, 3) < 0)
43 | {
44 | perror("listen");
45 | exit(EXIT_FAILURE);
46 | }
47 | if ((new_socket = accept(server_fd, (struct sockaddr *)&address,
48 | (socklen_t*)&addrlen))<0)
49 | {
50 | perror("accept");
51 | exit(EXIT_FAILURE);
52 | }
53 | valread = read( new_socket , buffer, 1024);
54 | printf("%s\n",buffer );
55 | send(new_socket , hello , strlen(hello) , 0 );
56 | printf("Hello message sent\n");
57 | return 0;
58 | }
--------------------------------------------------------------------------------
/transpose.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 | from matplotlib.patches import Circle
6 | import scipy
7 | from mpl_toolkits.mplot3d import Axes3D
8 | import time
9 | from camera_capture import get_image
10 | from velodyne_capture_v3 import init_velo_socket, get_pointcloud
11 | import socket
12 |
13 |
14 | def minmax_scale(x, i_min, i_max, o_min, o_max):
15 | return (x-i_min)/float(i_max-i_min)*(o_max-o_min)+o_min
16 |
17 |
18 | def get_calibration(pcl):
19 | X= pcl[:,0]
20 | Y= pcl[:,1]
21 | Z= pcl[:,2]
22 | distance = pcl[:,3]
23 |
24 | # For matrix values
25 | xr = 95 * math.pi/180
26 | yr = 10 * math.pi/180
27 | zr = 0 * math.pi/180
28 |
29 | # start z by 90 y by -90
30 |
31 | Xr = np.matrix([[1,0,0],[0,math.cos(xr),-1*math.sin(xr)],[0,math.sin(xr),math.cos(xr)]])
32 | Yr = np.matrix([[math.cos(yr),0,math.sin(yr)],[0,1,0],[-1*math.sin(yr),0,math.cos(yr)]])
33 | Zr = np.matrix([[math.cos(zr),-1*math.sin(zr),0],[math.sin(zr),math.cos(zr),0],[0,0,1]])
34 |
35 | F = np.matrix([[935,0,0],[0,935,0],[225,375,1]])
36 |
37 | #rotation matrix
38 | R = np.matmul(Zr,Yr)
39 | R= np.matmul(R,Xr)
40 |
41 | # transpose matric
42 | T = np.matrix([[1.1],[0],[-1.32]])
43 |
44 | size= len(X)
45 | X1= np.matrix.transpose(X)
46 | Y1= np.matrix.transpose(Y)
47 | Z1= np.matrix.transpose(Z)
48 | A=[X1,Y1,Z1]
49 | A= np.matrix([X1,Y1 ,Z1])
50 |
51 | T1=np.matrix.transpose(T)
52 | T2= np.repeat(T1,size,axis=0)
53 |
54 | T2= np.matrix.transpose(T2)
55 |
56 | c2 = np.matmul((F), (R))
57 | c2 = .25*np.matmul((c2),(A+T2))
58 |
59 | return c2
60 |
61 | PORT = 2368
62 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
63 | soc.bind(('', PORT))
64 |
65 | pcl = get_pointcloud(soc)
66 | c2 = get_calibration(pcl)
67 |
68 | xcenter = 307
69 | ycenter = 207
70 | B = np.square((c2[0,:]-xcenter)) + np.square((c2[1,:]-ycenter))
71 | index = int(np.argmin(B, axis=1))
72 |
73 | cmap = plt.get_cmap('brg')
74 | pointsDist = np.asarray(distance)
75 | pointsDist = np.round(minmax_scale(1.0/pointsDist,1.0/75,1.0/1.5,1,255).astype('uint8'))
76 | pointsColor = np.array([cmap(1.0-pdist/255.0)[:3] for pdist in pointsDist])
77 | plt.scatter(np.asarray(c2[0,:]), np.asarray(c2[1,:]), s=0.5, c=pointsColor)
78 | circ = Circle((c2[0,index],c2[1,index]), 5, color='red')
79 | ax.add_patch(circ)
80 | print(distance[index])
81 | plt.show()
--------------------------------------------------------------------------------
/utils_data.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 | #import tensorflow as tf
8 |
9 | # Note: float32 is used to meet other tensorflow functions
10 |
11 | # 'P_cam': Intrinsic matrix (3x4)
12 | # 'R_lidar2cam': Extrinsic matrix (4x4)
13 | class CalibFields(object):
14 | intrinsic = 'P_cam'
15 | extrinsic = 'R_lidar2cam'
16 |
17 | _D_MAX = 75.0
18 | _D_MIN = 2.0
19 | _CMAP = plt.get_cmap('brg')
20 | _MODE = 'inverse'
21 |
22 | init_v_t = np.array([-0.5,0.02,-0.98],dtype=np.float32)
23 | init_R_rot = np.array([[0., 0., -1.],
24 | [0., -1., 0.],
25 | [1., 0., 0.]],dtype=np.float32)
26 | # init_R_int = np.array([[898.7, 0., 359., 0.],
27 | # [0., 901.4, 652., 0.],
28 | # [0., 0., 1., 0.]],dtype=np.float32)
29 |
30 | init_R_int = np.array([[935.0, 0., 359., 0.],
31 | [0., 935., 640., 0.],
32 | [0., 0., 1., 0.]],dtype=np.float32)
33 |
34 | """
35 |
36 | x: forward
37 | y: left
38 | z: up
39 |
40 |
41 | x: down
42 | y: right
43 | """
44 |
45 | def loadCalib(f_int,f_ext,R_int=init_R_int,v_t=init_v_t,R_rot=init_R_rot,
46 | ltype='m8'):
47 | dict_calib = {}
48 | # Intrinsic matrix (3x4)
49 | if f_int:
50 | dict_calib[CalibFields.intrinsic] = np.loadtxt(f_int, delimiter=' ')\
51 | .astype(np.float32)
52 | else:
53 | dict_calib[CalibFields.intrinsic] = R_int
54 | # Extrinsic Matrix (4x4)
55 | if f_ext:
56 | dict_calib[CalibFields.extrinsic] = np.loadtxt(f_ext, delimiter=' ')\
57 | .astype(np.float32)
58 | else:
59 | dict_calib[CalibFields.extrinsic] = np.eye(4,dtype=np.float32)
60 | dict_calib[CalibFields.extrinsic][:3,3] = v_t
61 | if ltype == 'm8':
62 | dict_calib[CalibFields.extrinsic][:3,:3] = R_rot
63 | elif ltype == 'velo':
64 | rot90 = np.zeros((3,3))
65 | rot90[0,1] = -1.0
66 | rot90[1,0] = 1.0
67 | rot90[2,2] = 1.0
68 | dict_calib[CalibFields.extrinsic][:3,:3] = np.dot(R_rot,rot90)
69 | else:
70 | dict_calib[CalibFields.extrinsic][:3,:3] = R_rot
71 | return dict_calib
72 |
73 | def minmax_scale(x,i_min,i_max,o_min,o_max):
74 | # MinMax scaling of x
75 | # i_min<= x <= i_max to o_min<= x_new <= o_max
76 | return (x-i_min)/float(i_max-i_min)*(o_max-o_min)+o_min
77 |
78 | def coord_transform(points, t_mat):
79 | # Change to homogeneous form
80 | points = np.hstack([points,np.ones((np.shape(points)[0],1))])
81 | t_points = np.dot(points,t_mat.T)
82 | print(t_points.shape)
83 | # Normalize
84 | t_points = t_points[:,:-1]/t_points[:,[-1]]
85 | return t_points
86 |
87 | def project_lidar_to_img(dict_calib,points,im_height,im_width):
88 | # Extract depth data first before projection to 2d image space
89 | trans_mat = dict_calib[CalibFields.extrinsic]
90 | points3D = coord_transform(points,trans_mat)
91 | print('points3d shape:', points3D.shape)
92 | pointsDist = points3D[:,2]
93 |
94 | # Project to image space
95 | trans_mat = np.dot(dict_calib[CalibFields.intrinsic],trans_mat)
96 | points2D = coord_transform(points,trans_mat)
97 | print('points2d shape:', points2D.shape)
98 |
99 |
100 | # Find only feasible points
101 | idx1 = (points2D[:,0]>=0) & (points2D[:,0] <=im_height-1)
102 | idx2 = (points2D[:,1]>=0) & (points2D[:,1] <=im_width-1)
103 | idx3 = (pointsDist>=0)
104 | idx_in = idx1 & idx2 & idx3
105 |
106 | return points2D[idx_in,:], pointsDist[idx_in]
107 |
108 | def dist_to_pixel(val_dist, mode,
109 | d_max=_D_MAX, d_min=_D_MIN):
110 | """ Returns pixel value from distance measurment
111 | Args:
112 | val_dist: distance value (m)
113 | mode: 'inverse' vs 'standard'
114 | d_max: maximum distance to consider
115 | d_min: minimum distance to consider
116 | Returns:
117 | pixel value in 'uint8' format
118 | """
119 | val_dist = d_max if val_dist>d_max else val_dist if val_dist>d_min else d_min
120 | if mode == 'standard':
121 | return np.round(minmax_scale(val_dist,
122 | d_min,d_max,
123 | 1,255)).astype('uint8')
124 | elif mode == 'inverse':
125 | return np.round(minmax_scale(1.0/val_dist,
126 | 1.0/d_max,1.0/d_min,
127 | 1,255)).astype('uint8')
128 | else:
129 | # Default is inverse
130 | return np.round(minmax_scale(1.0/val_dist,
131 | 1.0/d_max,1.0/d_min,
132 | 1,255)).astype('uint8')
133 |
134 | def points_to_img(points2D,pointsDist,im_height,im_width,mode=_MODE):
135 | points2D = np.round(points2D).astype('int')
136 | im_depth = np.zeros((im_height,im_width),dtype=np.uint8)
137 | for i,point in enumerate(points2D):
138 | im_depth[point[0],point[1]] = dist_to_pixel(pointsDist[i],mode=mode)
139 |
140 | return im_depth.reshape(im_height,im_width,1)
141 |
142 | # --------------------------------------------------------------
143 | # Functions for Tensorflow
144 | # --------------------------------------------------------------
145 |
146 | def tf_coord_transform(points, t_mat):
147 | # Change to homogeneous form
148 | points = tf.concat([points,tf.ones([tf.shape(points)[0],1],tf.float32)], 1)
149 | t_points = tf.matmul(points,tf.transpose(t_mat))
150 | # Normalize
151 | t_points = tf.div(t_points[:,:-1],tf.expand_dims(t_points[:,-1],1))
152 | return t_points
153 |
154 | def tf_project_lidar_to_img(dict_calib,points,im_height,im_width):
155 | # Extract depth data first before projection to 2d image space
156 | trans_mat = dict_calib[CalibFields.extrinsic]
157 | points3D = tf_coord_transform(points,trans_mat)
158 | pointsDist = points3D[:,2]
159 |
160 | # Project to image space
161 | trans_mat = tf.matmul(dict_calib[CalibFields.intrinsic],trans_mat)
162 | points2D = tf_coord_transform(points,trans_mat)
163 |
164 | # Find only feasible points
165 | idx1 = (points2D[:,0]>=0) & (points2D[:,0] <=tf.to_float(im_height)-1)
166 | idx2 = (points2D[:,1]>=0) & (points2D[:,1] <=tf.to_float(im_width)-1)
167 | idx3 = (pointsDist>=0)
168 | idx_in = idx1 & idx2 & idx3
169 |
170 | return tf.boolean_mask(points2D,idx_in), tf.boolean_mask(pointsDist,idx_in)
171 |
172 | def tf_dist_to_pixel(val_dist, mode,
173 | d_max=_D_MAX, d_min=_D_MIN):
174 | """ Returns pixel value from distance measurment
175 | Args:
176 | val_dist: distance value (m)
177 | mode: 'inverse' vs 'standard'
178 | d_max: maximum distance to consider
179 | d_min: minimum distance to consider
180 | Returns:
181 | pixel value in 'uint8' format
182 | """
183 | val_dist = tf.maximum(val_dist,d_min)
184 | val_dist = tf.minimum(val_dist,d_max)
185 | if mode == 'standard':
186 | return tf.cast(tf.round(minmax_scale(val_dist,
187 | d_min,d_max,
188 | 1,255)),tf.uint8)
189 | elif mode == 'inverse':
190 | return tf.cast(tf.round(minmax_scale(1.0/val_dist,
191 | 1.0/d_max,1.0/d_min,
192 | 1,255)),tf.uint8)
193 | else:
194 | # Default is inverse
195 | return tf.cast(tf.round(minmax_scale(1.0/val_dist,
196 | 1.0/d_max,1.0/d_min,
197 | 1,255)),tf.uint8)
198 |
199 | def tf_points_to_img(points2D,pointsDist,im_height,im_width,mode=_MODE):
200 | pointsPixel = tf_dist_to_pixel(pointsDist,mode=mode)
201 | points2D_yx = tf.cast(tf.round(points2D),tf.int32)
202 | img = tf.scatter_nd(points2D_yx,pointsPixel,[im_height,im_width])
203 |
204 | return tf.expand_dims(img, 2)
205 |
206 | def imlidarwrite(fname,im,im_depth):
207 | """Write image with RGB and depth
208 | Args:
209 | fname: file name
210 | im: RGB image array (h x w x 3)
211 | im_depth: depth image array (h x w)
212 | """
213 | im_out = im.copy()
214 | im_depth = np.squeeze(im_depth,axis=2)
215 | idx_h, idx_w = np.nonzero(im_depth)
216 | for hi,wi in zip(idx_h,idx_w):
217 | im_out[hi,wi,:] = (255*np.array(
218 | _CMAP(im_depth[hi,wi]/255.0)[:3]))\
219 | .astype(np.uint8)
220 | imsave(fname,im_out)
221 | print(" ... Write:{}".format(fname))
222 |
--------------------------------------------------------------------------------
/velodyne_capture_v2.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import math
3 | import time
4 | import datetime
5 | from datetime import datetime
6 | import numpy as np
7 | import pandas as pd
8 | import os
9 | import struct
10 |
11 | HOST = "192.168.1.201"
12 | PORT = 2368
13 |
14 | vertical_angle = [-15, 1, -13, -3, -11, 5, -9, 7, -7, 9, -5, 11, -3, 13, -1, 15]
15 | count = 0
16 | count1 = 0
17 | initial_angle = 0
18 |
19 | def init_velo_socket():
20 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
21 | soc.bind(('', PORT))
22 |
23 | #data_buff = pd.DataFrame(columns=['x', 'y', 'z', 'distance'])
24 | def get_pointcloud(soc):
25 | prev_time = datetime.now()
26 | data_buff = []
27 | count = 0
28 |
29 | while True:
30 | # get data from port
31 | data = soc.recv(1248)
32 | #print('pcl size: ', len(data))
33 | # get all data except the last 2 bytes
34 | raw_data = data[:-2]
35 | count += 1
36 | #print('Avg for one packet in microseconds: ', (datetime.now()-prev_time).microseconds/count)
37 | if(count==90):
38 | #print('Time for one pcl in microseconds: ', (datetime.now()-prev_time).microseconds)
39 | prev_time = datetime.now()
40 |
41 | #data_buff = pd.DataFrame(columns=['x', 'y', 'z', 'distance'])
42 | #data_buff = []
43 | #reset counter
44 | count = 0
45 | break
46 |
47 |
48 | # for each of the 12 data blocks:
49 | for k in range(12):
50 |
51 | # kth block:
52 | offset = k * 100
53 | # print('Block #: {}'.format(k))
54 |
55 | '''
56 | Get azimuth data (see data packet sheet)
57 | Get values, reverse bytes, combine, converte to decimal, divide by 100
58 | '''
59 | azimuth = raw_data[2+offset] | (raw_data[3+offset] << 8)
60 | azimuth = azimuth / 100
61 | alpha = azimuth * np.pi / 180.0
62 | # print(azimuth)
63 |
64 | for i in range(2):
65 | for j in range(16):
66 | distance = raw_data[4+j*3+i*48+offset] | (raw_data[5+j*3+offset] << 8)
67 | distance = distance / 500
68 | reflectivity = data[6+j*3+offset]
69 |
70 | # how to get alpha angle?
71 | omega = vertical_angle[j] * np.pi / 180.0
72 | x = distance*math.cos(omega)*math.sin(alpha)
73 | y = distance*math.cos(omega)*math.cos(alpha)
74 | z = distance*math.sin(omega)
75 |
76 | # limit fov
77 | #if(45 <= azimuth <= 135):
78 | # print('angle1: %f\tangle2: %f\t x: %f\ty: %f\tz: %f\tdistance: %f\t' % (azimuth, azimuth2, x, y, z, distance))
79 | # data_buff.loc[count1] = [x, y, z, distance]
80 | data_buff.append([x,y,z,distance])
81 |
82 |
83 | return np.array(data_buff)
84 |
85 |
86 | # Code to test point clouds:
87 | print('point cloud test')
88 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
89 | soc.bind(('', PORT))
90 | np.savetxt('pcl.csv', get_pointcloud(soc), delimiter=',')
91 | soc.close()
92 |
93 |
94 |
--------------------------------------------------------------------------------
/velodyne_capture_v3.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import math
3 | import time
4 | import datetime
5 | from datetime import datetime
6 | import numpy as np
7 | import pandas as pd
8 | import os
9 | import struct
10 |
11 | HOST = "192.168.1.201"
12 | PORT = 2368
13 |
14 | LASER_ANGLES = [-15, 1, -13, 3, -11, 5, -9, 7, -7, 9, -5, 11, -3, 13, -1, 15]
15 | NUM_LASERS = 16
16 |
17 | EXPECTED_PACKET_TIME = 0.001327 # valid only in "the strongest return mode"
18 | EXPECTED_SCAN_DURATION = 0.1
19 | DISTANCE_RESOLUTION = 0.002
20 | ROTATION_RESOLUTION = 0.01
21 | ROTATION_MAX_UNITS = 36000
22 |
23 |
24 | def init_velo_socket():
25 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
26 | soc.bind(('', PORT))
27 |
28 | #data_buff = pd.DataFrame(columns=['x', 'y', 'z', 'distance'])
29 |
30 | def calc(dis, azimuth, laser_id, timestamp):
31 | R = dis * DISTANCE_RESOLUTION
32 | omega = LASER_ANGLES[laser_id] * np.pi / 180.0
33 | alpha = azimuth / 100.0 * np.pi / 180.0
34 | X = R * np.cos(omega) * np.sin(alpha)
35 | Y = R * np.cos(omega) * np.cos(alpha)
36 | Z = R * np.sin(omega)
37 | return [X, Y, Z, R]
38 |
39 | def get_pointcloud(soc):
40 | prev_time = datetime.now()
41 | data_buff = []
42 | count = 0
43 | timestamp = 0
44 | time_offset = 0
45 |
46 | while True:
47 | # get data from port
48 | data = soc.recv(1248)
49 | count += 1
50 | if count == 90:
51 | count = 0
52 | break
53 | #print('pcl size: ', len(data))
54 | # get all data except the last 2 bytes
55 | raw_data = data[:-2]
56 | for offset in xrange(0, 1200, 100):
57 | flag, azimuth = struct.unpack_from(" 0:
72 | data_buff.append([x, y, z, dist])
73 |
74 |
75 | return np.array(data_buff)
76 |
77 |
78 | # Code to test point clouds:
79 | print('point cloud test')
80 | soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
81 | soc.bind(('', PORT))
82 | np.savetxt('pcl.csv', get_pointcloud(soc), delimiter=',')
83 | soc.close()
84 |
85 |
86 |
--------------------------------------------------------------------------------
/vlp16.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | """
4 | read or unpack Velodyne VLP-16 data
5 | usage:
6 | ./velodyne.py [bin file dir]
7 | """
8 |
9 | import os
10 | import csv
11 | import sys
12 | import socket
13 | import glob
14 | from datetime import datetime, timedelta
15 | import struct
16 | import time
17 | import traceback
18 | import numpy as np
19 | from multiprocessing import Process, Queue, Pool
20 |
21 | import logging
22 | import logging.config
23 |
24 | HOST = "192.168.1.201"
25 | PORT = 2368
26 |
27 | LASER_ANGLES = [-15, 1, -13, 3, -11, 5, -9, 7, -7, 9, -5, 11, -3, 13, -1, 15]
28 | NUM_LASERS = 16
29 |
30 | EXPECTED_PACKET_TIME = 0.001327 # valid only in "the strongest return mode"
31 | EXPECTED_SCAN_DURATION = 0.1
32 | DISTANCE_RESOLUTION = 0.002
33 | ROTATION_RESOLUTION = 0.01
34 | ROTATION_MAX_UNITS = 36000
35 |
36 | DATA_QUEUE = Queue(-1)
37 |
38 | formatter = '[%(asctime)s][%(filename)s:%(lineno)s][%(levelname)s][%(message)s]'
39 |
40 | LOGGING_CONFIG = {
41 | 'version': 1,
42 | 'disable_existing_loggers': False, # this fixes the problem
43 |
44 | 'formatters': {
45 | 'standard': {
46 | 'format': formatter,
47 | },
48 | },
49 | 'handlers': {
50 | 'default': {
51 | 'level': 'DEBUG',
52 | 'class': 'logging.StreamHandler',
53 | 'formatter': 'standard'
54 | },
55 | "debug_file_handler": {
56 | "class": "logging.handlers.TimedRotatingFileHandler",
57 | "level": "DEBUG",
58 | "formatter": "standard",
59 | "filename": "./logs/lidar.log",
60 | "when": "D",
61 | "interval": 1,
62 | "backupCount": 30,
63 | "encoding": "utf8"
64 | },
65 | },
66 | 'loggers': {
67 | '': {
68 | 'handlers': ["default", 'debug_file_handler'],
69 | 'level': 'DEBUG',
70 | 'propagate': False
71 | },
72 | }
73 | }
74 |
75 | logging.config.dictConfig(LOGGING_CONFIG)
76 | logger = logging.getLogger("")
77 |
78 | def save_csv(path, data):
79 | with open(path, 'w') as fp:
80 | wr = csv.writer(fp, delimiter=',')
81 | wr.writerows(data)
82 |
83 | def calc(dis, azimuth, laser_id, timestamp):
84 | R = dis * DISTANCE_RESOLUTION
85 | omega = LASER_ANGLES[laser_id] * np.pi / 180.0
86 | alpha = azimuth / 100.0 * np.pi / 180.0
87 | X = R * np.cos(omega) * np.sin(alpha)
88 | Y = R * np.cos(omega) * np.cos(alpha)
89 | Z = R * np.sin(omega)
90 | return [X, Y, Z, timestamp]
91 |
92 | def unpack(dirs):
93 | files = glob.glob(dirs + '/*.bin')
94 | points = []
95 | scan_index = 0
96 | prev_azimuth = None
97 | for x in files:
98 | d = open(x, 'rb').read()
99 | n = len(d)
100 | for offset in xrange(0, n, 1223):
101 | ts = d[offset : offset + 17]
102 | data = d[offset + 17 : offset + 1223]
103 | print (ts, len(data))
104 | timestamp, factory = struct.unpack_from(" 0:
182 | assert len(data) == 1206, len(data)
183 | data_queue.put({'data': data, 'time': time.time()})
184 | except Exception:
185 | pass
186 | # print (dir(e), e.message, e.__class__.__name__)
187 | # traceback.print_exc(e)
188 | except KeyboardInterrupt:
189 | pass
190 | #print (e)
191 |
192 | if __name__ == "__main__":
193 | if len(sys.argv) < 3:
194 | print (__doc__)
195 | sys.exit(2)
196 | if sys.argv[1] == 'read':
197 | top_dir = datetime.now().strftime('%Y-%m-%d_%H%M%S')
198 | processA = Process(target = capture, args = (PORT, DATA_QUEUE))
199 | processA.start()
200 | processB = Process(target = save_package, args = (sys.argv[2] + '/' + top_dir, DATA_QUEUE))
201 | processB.start()
202 | else:
203 | unpack(sys.argv[2])
--------------------------------------------------------------------------------
/yolo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from __future__ import print_function
4 | import socket
5 | import struct
6 | import binascii
7 | import array
8 | import numpy as np
9 | import time
10 | #import cv2
11 |
12 | TCP_IP = '127.0.0.1'
13 | TCP_PORT = 8080
14 | BUFFER_SIZE = 1024
15 |
16 | def init_yolo_socket():
17 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
18 | s.connect((TCP_IP, TCP_PORT))
19 |
20 | def get_bounding_boxes(s):
21 | objects = []
22 | while 1:
23 | data = s.recv(BUFFER_SIZE)
24 | #print('yolo size: ', len(data))
25 | st = struct.Struct('=IIQi250i')
26 | try:
27 | packet = st.unpack(data)
28 | except:
29 | continue
30 | #print("Start")
31 | #print('received packet #', packet[0])
32 | timestamp = packet[2]
33 | numObjects = packet[3]
34 | #print('Timestamp: ', timestamp, 'NumObjects: ', numObjects)
35 | for i in range(numObjects):
36 | index = 5*i
37 | left = packet[index+4]
38 | right = packet[index+5]
39 | top = packet[index+6]
40 | bottom = packet[index+7]
41 | object_type = packet[index+8]
42 | objects.append([left,right,top,bottom,object_type])
43 | #print(left, right, top, bottom)
44 | #print("End")
45 | break
46 | return timestamp, numObjects, objects
47 |
48 |
49 | def close_yolo_socket():
50 | s.close()
51 |
52 | if __name__ == "__main__":
53 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
54 | s.connect((TCP_IP, TCP_PORT))
55 | timestamps = []
56 | while 1:
57 | timestamp, numObjects, objects = get_bounding_boxes(s)
58 | for i in range(numObjects):
59 | left = objects[i][0]
60 | right = objects[i][1]
61 | top = objects[i][2]
62 | bottom = objects[i][3]
63 | print((left+right/2.0), (top+bottom)/2.0)
64 | print('\n')
65 |
--------------------------------------------------------------------------------