├── D3QN_V3.py
├── ENV.py
├── README.md
├── Test_D3QN .py
├── Test_D3QN.py
├── model_editor_models
├── DQN_world_0
│ ├── model.config
│ └── model.sdf
├── DQN_world_1
│ ├── model.config
│ └── model.sdf
├── DQN_world_2
│ ├── model.config
│ └── model.sdf
├── DQN_world_3
│ ├── model.config
│ └── model.sdf
├── DQN_world_4
│ ├── model.config
│ └── model.sdf
├── MW_world_1
│ ├── model.config
│ └── model.sdf
├── MW_world_1_0
│ ├── model.config
│ └── model.sdf
├── MW_world_1_0_0
│ ├── model.config
│ └── model.sdf
├── door
│ ├── model.config
│ └── model.sdf
├── engineering_1F
│ ├── model.config
│ └── model.sdf
├── engineering_2F
│ ├── model.config
│ └── model.sdf
├── engineering_815
│ ├── model.config
│ └── model.sdf
├── grey_wall_mw
│ ├── model.config
│ └── model.sdf
├── image_acquisition_map
├── indoor
│ ├── model.config
│ └── model.sdf
├── indoor_2
│ ├── model.config
│ └── model.sdf
├── indoor_3
│ ├── model.config
│ └── model.sdf
├── indoor_environment
│ ├── model.config
│ └── model.sdf
├── indoor_environment_4
│ ├── model.config
│ └── model.sdf
├── mw_5
│ ├── model.config
│ └── model.sdf
├── square_empty
│ ├── model.config
│ └── model.sdf
├── test_map
│ ├── model.config
│ └── model.sdf
├── test_map_2
│ ├── model.config
│ └── model.sdf
└── test_map_3
│ ├── model.config
│ └── model.sdf
├── path trajectory
├── MW_world_gazebo.jpg
├── MW_world_rviz.png
├── indoor_gazebo.jpg
├── indoor_gazebo.png
├── test_map.jpg
├── test_map2.jpg
├── test_map2_gazebo.png
├── test_map2_gazebo2.png
├── test_map3_gazebo3.png
└── test_map_gazebo.png
├── real_world_test
├── D3QN_test.py
├── ENV.py
├── Saved_models
│ ├── D3QN_V_17_single.h5
│ └── readme.md
├── draw_control_input.py
├── drone_control.py
├── models
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ ├── fcrn.cpython-35.pyc
│ │ └── network.cpython-35.pyc
│ ├── fcrn.py
│ └── network.py
└── readme.md
├── save_model
├── D3QN_V3.h5
├── D3QN_V_3_single.h5
├── est_model_V1 (1).h5
└── est_model_V1.json
└── training env
├── default_gzclient_camera(1)-2020-01-20T23_49_03.550191.jpg
├── default_gzclient_camera(1)-2020-01-20T23_49_41.711470.jpg
├── default_gzclient_camera(1)-2020-01-20T23_50_14.533198.jpg
├── default_gzclient_camera(1)-2020-01-20T23_50_52.218574.jpg
├── default_gzclient_camera(1)-2020-01-20T23_51_25.348340.jpg
└── default_gzclient_camera(1)-2020-01-20T23_51_43.545469.jpg
/D3QN_V3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | import warnings
5 | warnings.simplefilter(action='ignore', category=FutureWarning)
6 |
7 | from ENV import ENV
8 | from keras import backend as K
9 | from keras.optimizers import RMSprop, Adam
10 | from keras.layers.convolutional import Conv2D
11 | from keras.utils.training_utils import multi_gpu_model
12 | from keras.models import Sequential ,load_model, Model
13 | from keras.backend.tensorflow_backend import set_session
14 | from keras.layers import Input, Dense, Flatten, Lambda, add
15 | from sensor_msgs.msg import LaserScan, Image, Imu
16 | from gazebo_msgs.msg import ModelState, ModelStates
17 | from gazebo_msgs.srv import SetModelState
18 | from geometry_msgs.msg import Vector3Stamped, Twist
19 | from skimage.transform import resize
20 | from skimage.color import rgb2gray
21 | from collections import deque
22 | from PIL import Image as iimage
23 |
24 | import matplotlib
25 | import matplotlib.pyplot as plt
26 | import rospy
27 | import tensorflow as tf
28 | import scipy.misc
29 | import numpy as np
30 | import random
31 | import time
32 | import random
33 | import pickle
34 | import models
35 | import cv2
36 | import copy
37 |
38 | # load depth estimation model
39 | with tf.name_scope("predict"):
40 | height = 228
41 | width = 304
42 | channels = 3
43 | batch_size = 1
44 | input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))
45 | model_data_path = './NYU_FCRN-checkpoint/NYU_FCRN.ckpt'
46 |
47 | print('start create the session and model')
48 | net = models.ResNet50UpProj({'data': input_node}, batch_size, 1, False)
49 | config = tf.ConfigProto()
50 | cnn_sess = tf.Session(config=config)
51 | cnn_saver = tf.train.Saver()
52 | cnn_saver.restore(cnn_sess, model_data_path)
53 | print('Finishied')
54 |
55 | laser = None
56 | velocity = None
57 | vel = None
58 | theta = None
59 | pose = None
60 | orientation = None
61 | image = None
62 | depth_img = None
63 |
64 | def DepthCallBack(img):
65 | global depth_img
66 | depth_img = img.data
67 |
68 | def callback_laser(msg):
69 | global laser
70 | laser = msg
71 | laser = laser.ranges
72 |
73 | def callback_camera(msg):
74 | global image
75 | image = np.frombuffer(msg.data, dtype=np.uint8)
76 | image = np.reshape(image, [480,640,3])
77 | image = np.array(image)
78 |
79 | def GetDepthObservation(image):
80 | width = 304
81 | height = 228
82 |
83 | test_image = iimage.fromarray(image,'RGB')
84 | test_image = test_image.resize([width, height], iimage.ANTIALIAS)
85 | test_image = np.array(test_image).astype('float32')
86 | test_image = np.expand_dims(np.asarray(test_image), axis = 0)
87 | pred = cnn_sess.run(net.get_output(), feed_dict={input_node: test_image})
88 | pred = np.reshape(pred, [128,160])
89 | pred = np.array(pred, dtype=np.float32)
90 |
91 | pred[np.isnan(pred)] = 5.
92 | pred = pred / 3.5
93 | pred[pred > 1.0] = 1.0
94 |
95 | return pred
96 |
97 | def callback_laser(msg):
98 | global laser
99 | laser = msg
100 | laser = laser.ranges
101 |
102 | def crash_check(laser_data, velocity, theta, delta_depth):
103 | laser_sensor = np.array(laser_data)
104 | laser_index = np.isinf(laser_sensor)
105 | laser_sensor[laser_index] = 30
106 | laser_sensor = np.array(laser_sensor[300:800])
107 | done = False
108 | vel_flag = False
109 | zone_1_flag = False
110 |
111 | crash_reward = 0
112 | depth_reward = 0
113 | vel_reward = 0
114 | depth_value = (np.min(laser_sensor) - 0.5) / 2.0
115 |
116 | # reward for zone 1
117 | if depth_value >= 0.4:
118 | depth_reward = 0.4
119 | vel_flag = True
120 |
121 | # reward for zone 2
122 | else:
123 | vel_factor = np.absolute(np.cos(velocity))
124 | _depth_reward = depth_value * vel_factor + delta_depth
125 | depth_reward = np.min([0.4, _depth_reward])
126 | vel_flag = False
127 |
128 | # reward for crash
129 | if np.min(laser_sensor) <= 0.6:
130 | done = True
131 | vel_flag = False
132 |
133 | # reward for velocity
134 | else:
135 | if vel_flag:
136 | vel_reward = velocity * np.cos(theta)* 0.2
137 |
138 | else:
139 | vel_reward = 0
140 | # select reward
141 | if done:
142 | reward = -1.0
143 | else:
144 | reward = depth_reward + vel_reward
145 |
146 | return done, reward, np.min(laser_sensor), depth_value
147 |
148 | def depth_change(depth,_depth):
149 | laser = depth # current depth
150 | _laser = _depth # previous depth
151 | eta = 0.2
152 |
153 | delta_depth = eta * np.sign(laser - _laser)
154 | return delta_depth
155 |
156 | def show_figure(image):
157 | #show image using cv2
158 | #image = cv2.resize(image, (256*2, 320*2), interpolation=cv2.INTER_CUBIC)
159 | cv2.imshow('input image', image)
160 | cv2.waitKey(1)
161 |
162 |
163 | def state_callback(msg):
164 | global velocity, pose, orientation, vel, theta
165 | idx = msg.name.index("quadrotor")
166 |
167 | pose = msg.pose[idx].position
168 | orientation = msg.pose[idx].orientation
169 | vel = msg.twist[idx]
170 |
171 | velocity_x = vel.linear.x
172 | velocity_y = vel.linear.y
173 | velocity = np.sqrt(velocity_x**2 + velocity_y**2)
174 | theta = vel.angular.z
175 |
176 | class D3QNAgent:
177 | def __init__(self, action_size):
178 | self.render = False
179 | self.load_model = False
180 | # state and size of actiojn
181 | self.state_size = (128, 160 , 6)
182 | self.action_size = action_size
183 | # D3QN hyperparameters
184 | self.epsilon = 1.0
185 | self.epsilon_start, self.epsilon_end = 1.0, 0.0001
186 | self.exploration_steps = 100000
187 | self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) / self.exploration_steps
188 | self.batch_size = 16
189 | self.update_target_rate = 10000
190 | self.discount_factor = 0.99
191 | # Replay memory size, maximum: 50000
192 | # This could be changed depends on the RAM size of your computer
193 | self.memory = deque(maxlen=50000)
194 | self.no_op_steps = 5
195 | # Target network initialization
196 | # Build model using multi-GPU, if you want to train it with single-GPU changed the code
197 | # self.model = self.build_model()
198 | # self.target_model = self.build_model()
199 |
200 | self.model = multi_gpu_model(self.build_model(), gpus = 2)
201 | self.target_model = multi_gpu_model(self.build_model(), gpus = 2)
202 | self.update_target_model()
203 | self.optimizer = self.optimizer()
204 |
205 | # Tensorboard settings
206 | self.config = tf.ConfigProto()
207 | self.sess = tf.InteractiveSession(config=self.config)
208 | self.sess.run(tf.global_variables_initializer())
209 | K.set_session(self.sess)
210 |
211 | self.avg_q_max, self.avg_loss = 0, 0
212 | self.summary_placeholders, self.update_ops, self.summary_op = self.setup_summary()
213 | self.summary_writer = tf.summary.FileWriter('summary/D3QN', self.sess.graph)
214 |
215 | if self.load_model:
216 | self.model.load_weights("./save_model/D3QN.h5")
217 |
218 |
219 | # Defining optimizer to utilize Huber Loss function
220 | def optimizer(self):
221 | a = K.placeholder(shape=(None,), dtype='int32')
222 | y = K.placeholder(shape=(None,), dtype='float32')
223 |
224 | prediction = self.model.output
225 |
226 | a_one_hot = K.one_hot(a, self.action_size)
227 | q_value = K.sum(prediction * a_one_hot, axis=1)
228 | error = K.abs(y - q_value)
229 |
230 | quadratic_part = K.clip(error, 0.0, 1.0)
231 | linear_part = error - quadratic_part
232 | loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)
233 |
234 | optimizer = Adam(lr=0.0001, epsilon=0.01)
235 | updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
236 |
237 | train = K.function([self.model.input, a, y], [loss], updates=updates)
238 |
239 | return train
240 |
241 |
242 | # approximate Q function using Convolution Neural Network
243 | # state is input and Q Value of each action is output of network
244 | # dueling network's Q Value is sum of advantages and state value
245 | def build_model(self):
246 | input = Input(shape=self.state_size)
247 | h1 = Conv2D(32, (8, 8), strides = (8,8), activation = "relu", name = "conv1")(input)
248 | h2 = Conv2D(64, (4, 4), strides = (2,2), activation = "relu", name = "conv2")(h1)
249 | h3 = Conv2D(64, (3, 3), strides = (1,1), activation = "relu", name = "conv3")(h2)
250 | context = Flatten(name = "flatten")(h3)
251 |
252 | value_hidden = Dense(512, activation = 'relu', name = 'value_fc')(context)
253 | value = Dense(1, name = "value")(value_hidden)
254 | action_hidden = Dense(512, activation = 'relu', name = 'action_fc')(context)
255 | action = Dense(self.action_size, name = "action")(action_hidden)
256 | action_mean = Lambda(lambda x: tf.reduce_mean(x, axis = 1, keepdims = True), name = 'action_mean')(action)
257 | output = Lambda(lambda x: x[0] + x[1] - x[2], name = 'output')([action, value, action_mean])
258 | model = Model(inputs = input, outputs = output)
259 | model.summary()
260 |
261 | return model
262 | # Update target model
263 | def update_target_model(self):
264 | self.target_model.set_weights(self.model.get_weights())
265 |
266 | # Action selection through epsilon-greedy search
267 | def get_action(self, history):
268 | flag = False
269 | history = np.float32(history)
270 | if np.random.rand() <= self.epsilon:
271 | flag = True
272 | return random.randrange(self.action_size), flag
273 | else:
274 | flag = False
275 | q_value = self.model.predict(history)
276 | return np.argmax(q_value[0]), flag
277 |
278 | # Store samples in the order of [s,a,r,s'] into the replay memory
279 | def append_sample(self, history, action, reward, next_history, deads):
280 | self.memory.append((history, action, reward, next_history, deads))
281 |
282 | # Train the model by using the ramdomly extracted samples in the replay-memory
283 | def train_model(self):
284 | if self.epsilon > self.epsilon_end:
285 | self.epsilon -= self.epsilon_decay_step
286 |
287 | mini_batch = random.sample(self.memory, self.batch_size)
288 |
289 | history = np.zeros((self.batch_size, self.state_size[0],
290 | self.state_size[1], self.state_size[2]))
291 | next_history = np.zeros((self.batch_size, self.state_size[0],
292 | self.state_size[1], self.state_size[2]))
293 | target = np.zeros((self.batch_size,))
294 | action, reward, deads = [], [], []
295 |
296 | for i in range(self.batch_size):
297 | history[i] = np.float32(mini_batch[i][0])
298 | next_history[i] = np.float32(mini_batch[i][3])
299 | action.append(mini_batch[i][1])
300 | reward.append(mini_batch[i][2])
301 | deads.append(mini_batch[i][4])
302 |
303 | value = self.model.predict(next_history)
304 | target_value = self.target_model.predict(next_history)
305 |
306 | # like Q Learning, get maximum Q value at s'
307 | # But from target model
308 |
309 | for i in range(self.batch_size):
310 | if deads[i]:
311 | target[i] = reward[i]
312 | else:
313 | # the key point of Double DQN
314 | # selection of action is from model
315 | # update is from target model
316 |
317 | target[i] = reward[i] + self.discount_factor * target_value[i][np.argmax(value[i])]
318 |
319 | loss = self.optimizer([history, action, target])
320 | self.avg_loss += loss[0]
321 |
322 | # store the reward and Q-value on every episode
323 | def setup_summary(self):
324 | episode_total_reward = tf.Variable(0.)
325 | episode_avg_max_q = tf.Variable(0.)
326 | episode_duration = tf.Variable(0.)
327 | episode_avg_loss = tf.Variable(0.)
328 |
329 | tf.summary.scalar('Total_Reward/Episode', episode_total_reward)
330 | tf.summary.scalar('Average_Max_Q/Episode', episode_avg_max_q)
331 | tf.summary.scalar('Duration/Episode', episode_duration)
332 | tf.summary.scalar('Average_Loss/Episode', episode_avg_loss)
333 |
334 | summary_vars = [episode_total_reward, episode_avg_max_q,
335 | episode_duration, episode_avg_loss]
336 | summary_placeholders = [tf.placeholder(tf.float32) for _ in
337 | range(len(summary_vars))]
338 | update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in
339 | range(len(summary_vars))]
340 | summary_op = tf.summary.merge_all()
341 | return summary_placeholders, update_ops, summary_op
342 |
343 | if __name__ == '__main__':
344 | # Check the gazebo connection
345 | rospy.init_node('env', anonymous=True)
346 |
347 | # Class define
348 | env = ENV()
349 | # Parameter setting for the simulation
350 | agent = D3QNAgent(action_size = 8)
351 | EPISODE = 100000
352 | global_step = 0
353 | # Observe
354 | rospy.Subscriber('/camera/rgb/image_raw', Image, callback_camera,queue_size = 5)
355 | rospy.Subscriber("/scan", LaserScan, callback_laser,queue_size = 5)
356 | rospy.Subscriber('gazebo/model_states', ModelStates, state_callback, queue_size= 5)
357 | rospy.Subscriber('/camera/depth/image_raw', Image, DepthCallBack,queue_size = 5)
358 |
359 | # define command step
360 | rospy.sleep(2.)
361 | rate = rospy.Rate(5)
362 |
363 | # define episode and image_steps
364 | e = 0
365 | image_steps = 0
366 | collision_steps = 0
367 | env.reset_sim(pose, orientation)
368 |
369 | while e < EPISODE and not rospy.is_shutdown():
370 | e = e + 1
371 | env.reset_sim(pose, orientation)
372 | # get the initial state
373 | state = GetDepthObservation(image)
374 | history = np.stack((state, state, state, state, state, state), axis = 2)
375 | history = np.reshape([history], (1,128,160,6))
376 |
377 | laser_distance = np.zeros([1,2])
378 | delta_depth = 0
379 | step, score = 0. ,0.
380 | done = False
381 |
382 | while not done and not rospy.is_shutdown():
383 | # wait for service
384 | rospy.wait_for_message('/camera/rgb/image_raw', Image)
385 | rospy.wait_for_message('/camera/depth/image_raw', Image)
386 | rospy.wait_for_message('/gazebo/model_states', ModelStates)
387 | rospy.wait_for_message('/scan', LaserScan)
388 |
389 | global_step = global_step + 1
390 | step = step + 1
391 |
392 | # Receive the action command from the Q-network and do the action
393 | [action, flag] = agent.get_action(history)
394 | env.Control(action)
395 |
396 | # Observe: get_reward
397 | [done, reward, _depth, depth_value] = crash_check(laser, velocity, theta, delta_depth)
398 | delta_depth = depth_change(laser_distance[0], laser_distance[1])
399 |
400 | # image preprocessing
401 | next_state = GetDepthObservation(image)
402 | next_distance = _depth
403 |
404 | show_figure(next_state)
405 |
406 | # image for collision check
407 | next_state = np.reshape([next_state],(1,128,160,1))
408 | next_history = np.append(next_state, history[:,:,:,:5],axis = 3)
409 | laser_distance = np.append(next_distance, laser_distance[0])
410 |
411 | # Calculate the average_q_max
412 | agent.avg_q_max += np.amax(agent.model.predict(history)[0])
413 | agent.append_sample(history,action,reward,next_history, done)
414 | history = next_history
415 |
416 | if e >= agent.no_op_steps:
417 | agent.train_model()
418 | if global_step % agent.update_target_rate == 0:
419 | agent.update_target_model()
420 |
421 | if step >= 2000:
422 | done = True
423 |
424 | # Update the score
425 | score +=reward
426 | rate.sleep()
427 |
428 | if done:
429 | if e >= agent.no_op_steps:
430 | stats =[score, agent.avg_q_max/float(step),step, agent.avg_loss/ float(step)]
431 | for i in range(len(stats)):
432 | agent.sess.run(agent.update_ops[i], feed_dict={agent.summary_placeholders[i]: float(stats[i])})
433 |
434 | summary_str = agent.sess.run(agent.summary_op)
435 | agent.summary_writer.add_summary(summary_str, e - agent.no_op_steps)
436 |
437 | print("episode: " + str(e),
438 | "score: " + str(score),
439 | " memory length: " + str(len(agent.memory)),
440 | "epsilon: " + str(agent.epsilon),
441 | " global_step:"+ str(global_step),
442 | "average_q:" + str(agent.avg_q_max/float(step)),
443 | "average loss:" + str(agent.avg_loss / float(step)),
444 | 'step:' + str(step))
445 |
446 | agent.avg_q_max, agent.avg_loss = 0,0
447 |
448 | if e % 50 == 0:
449 | agent.model.save_weights("./save_model/D3QN_V3.h5")
450 |
--------------------------------------------------------------------------------
/ENV.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | ###########################################################################
5 | ###########################################################################
6 | from __future__ import print_function
7 | from geometry_msgs.msg import Twist, Pose
8 | from sensor_msgs.msg import LaserScan, Image
9 | from cv_bridge import CvBridge, CvBridgeError
10 | from keras.utils import to_categorical
11 | from gazebo_msgs.srv import SetModelState
12 | from gazebo_msgs.msg import ModelState
13 |
14 | import cv2
15 | import numpy as np
16 | import matplotlib.pyplot as plt
17 | import math
18 | import random
19 | import rospy
20 | import std_msgs.msg
21 | import sys, select, termios, tty
22 | import time
23 | ###########################################################################
24 | ###########################################################################
25 |
26 | class ENV(object):
27 | def __init__(self):
28 | # define gazebo connection and import gazebo msgs
29 | self.cmd_pub = rospy.Publisher('cmd_vel', Twist, queue_size = 100)
30 | self.pose_pub = rospy.Publisher('/command/pose', Pose, queue_size = 100)
31 | self.g_set_state = rospy.ServiceProxy('gazebo/set_model_state',SetModelState)
32 |
33 | self.twist = Twist()
34 | self.pose = Pose()
35 | self.action_table = [0.5, 0.25, 0.03 , 0.3, -0.3, 0, -0.15, 0.15]
36 | self.state = ModelState()
37 | self.state.model_name = 'quadrotor'
38 | self.state.reference_frame = 'world'
39 |
40 |
41 | def stop(self):
42 | self.twist.linear.x = 0; self.twist.linear.y = 0; self.twist.linear.z = 0
43 | self.twist.angular.x = 0; self.twist.angular.y = 0; self.twist.angular.z = 0
44 | self.cmd_pub.publish(self.twist)
45 |
46 | def hovering(self):
47 |
48 | self.state.pose.position.x = 0
49 | self.state.pose.position.y = 0
50 | self.state.pose.position.z = 1.2
51 | self.state.pose.orientation.x = 0
52 | self.state.pose.orientation.y = 0
53 | self.state.pose.orientation.z = 0
54 | self.state.twist.linear.x = 0
55 | self.state.twist.linear.y = 0
56 | self.state.twist.linear.z = 0
57 | self.state.twist.angular.x = 0
58 | self.state.twist.angular.y = 0
59 | self.state.twist.angular.z = 0
60 |
61 | ret = self.g_set_state(self.state)
62 |
63 |
64 | def Control(self,action):
65 | if action < 3:
66 | self.self_speed[0] = self.action_table[action]
67 | self.self_speed[1] = 0
68 | else:
69 | self.self_speed[1] = self.action_table[action]
70 |
71 | self.twist.linear.x = self.self_speed[0]
72 | self.twist.linear.y = 0.
73 | self.twist.linear.z = 0.
74 | self.twist.angular.x = 0.
75 | self.twist.angular.y = 0.
76 | self.twist.angular.z = self.self_speed[1]
77 | self.cmd_pub.publish(self.twist)
78 |
79 | def euler_to_quaternion(self, yaw, pitch, roll):
80 |
81 | qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
82 | qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
83 | qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
84 | qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
85 |
86 | return [qx, qy, qz, qw]
87 |
88 | def quaternion_to_euler(self, x, y, z, w):
89 |
90 | t0 = +2.0 * (w * x + y * z)
91 | t1 = +1.0 - 2.0 * (x * x + y * y)
92 | X = math.degrees(math.atan2(t0, t1))
93 |
94 | t2 = +2.0 * (w * y - z * x)
95 | t2 = +1.0 if t2 > +1.0 else t2
96 | t2 = -1.0 if t2 < -1.0 else t2
97 | Y = math.degrees(math.asin(t2))
98 |
99 | t3 = +2.0 * (w * z + x * y)
100 | t4 = +1.0 - 2.0 * (y * y + z * z)
101 | Z = math.degrees(math.atan2(t3, t4))
102 |
103 | return X, Y, Z
104 |
105 | def reset_sim(self, pose, orientation):
106 | pose_x = pose.x
107 | pose_y = pose.y
108 | pose_z = pose.z
109 |
110 | # spawn_table= [
111 | # [-38.71, 41.93],
112 | # [-27.13, 41.97],
113 | # [-18.66, 41.67],
114 | # [-15.15, 33.80],
115 | # [-36.60, 15.96],
116 | # [-14.69, 13.68]
117 |
118 | # [6.96, 33.80],
119 | # [16.58, 30.71],
120 | # [24.69, 36.19],
121 | # [20.49, 19.59],
122 | # [5.28, 4.63],
123 | # [33.73, 8.40],
124 | # [26.91, 15.03],
125 | # [36.53, 35.03],
126 | # [24.00, 2.85],
127 | # [16.37,5.91]
128 |
129 | # [-27, -10],
130 | # [-34, -20],
131 | # [-25, -25],
132 | # [-14, -15],
133 | # [-5, -18],
134 | # [-12, -5],
135 | # [-32, -35]
136 |
137 | # [12.63, -19.98],
138 | # [24.00, -22.83],
139 | # [27.53, -8.13],
140 | # [18.55, -32.01],
141 | # [26.32, -30.26],
142 | # [34.77, -28.82],
143 | # [6.70, -34.52],
144 | # [3.63, -15.97],
145 | # [4.40, -8.01]
146 | # ]
147 |
148 | # spawn_table = [[-26.48,6.65],[-18.62,-12.41],[-1.54,-22.16],[-19.89,-27]] # map b (indoor)
149 | # spawn_table = [[26.75,30.14],[-25.33,-19.98],[-1.181,30.48],[10.99,-22.30],[19.23,30.83]] #Map test_3
150 | # spawn_table = [[-14.75, -13.34]] # Map test_2
151 | spawn_table = [[0, 0]] # Map test_
152 | # spawn_table = [[-9.10, 34.95]] # Map test_2
153 | # spawn_table = [[-36.20, -3.09], [-24.59, 2.25], [-21.00, -5.13], [-11.37, -0.64], [-2.44, -0.59], [5.04, -3.37], [-0.848, -7.81], [-37.99, 1.96]] # indoor environment_4_1
154 | # spawn_table = [[-26.48,6.65],[-18.62,-12.41],[-1.54,-22.16],[-19.89,-27],[19.49,5.97],[20.45, -10.62],[20.76,-23.48]]
155 | # spawn_table = [[-24.5, -5.37],[-9.53, 9.62],[-5.53,26.08], [-13.54, 20.62], [-23.54, 23.62], [3.46, -1.37]] # map a
156 | # spawn_table = [[0.67, 0.27], [-13.10, 14.02], [-14.38, -14.96], [14.06, -10.86], [29.35, -4.52], [47.32, 7.84], [46.99, -8.37]]
157 |
158 | rand_indx = np.random.randint(1)
159 |
160 | ori_x = orientation.x
161 | ori_y = orientation.y
162 | ori_z = orientation.z
163 | ori_w = orientation.w
164 |
165 | # pos_rand = np.random.randint(-150,150)
166 | # pos_rand = 0
167 |
168 | [yaw, pitch, roll] = self.quaternion_to_euler(ori_x, ori_y, ori_z, ori_w)
169 | [ori_x, ori_y, ori_z, ori_w] = self.euler_to_quaternion(0, 0, 0)
170 |
171 | self.state.pose.position.x = spawn_table[rand_indx][0] + np.random.randint(-1,1)
172 | self.state.pose.position.y = spawn_table[rand_indx][1] + np.random.randint(-1,1)
173 | self.state.pose.position.z = 1.2
174 | self.state.pose.orientation.x = ori_x
175 | self.state.pose.orientation.y = ori_y
176 | self.state.pose.orientation.z = ori_z
177 | self.state.pose.orientation.w = ori_w
178 | self.state.twist.linear.x = 0
179 | self.state.twist.linear.y = 0
180 | self.state.twist.linear.z = 0
181 | self.state.twist.angular.x = 0
182 | self.state.twist.angular.y = 0
183 | self.state.twist.angular.z = 0
184 | self.self_speed = [0.03, 0.0]
185 | ret = self.g_set_state(self.state)
186 | rospy.sleep(0.5)
187 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Collision-avoidance
2 | Towards Monocular Vision Based Collision Avoidance Using Deep Reinforcement Learning
3 | You could see the algorithm verification in real environment from [here](https://www.youtube.com/watch?v=oSQHCsvuE-8). No distance sensors are used.
4 | The paper can be found in [here](https://www.sciencedirect.com/science/article/pii/S0957417422002111?via%3Dihub).
5 |
6 |    
7 |
8 | ### Overall Network Structure
9 | An RGB image from a monocular sensor is converted into a depth image. Then the estimated depth image is used as an input of the Dueling Double Deep Q-Network.
10 |
11 | 
12 |
13 | ### Depth Estimation
14 | - Tensorflow version == 1.12.0
15 | - Depth Estimation model is based on ResNet 50 architecture
16 | - python file that contains the model architecture is located in **models**
17 | - Due to huge size of trained depth estimation model, you have to download the depth estimation model [here](https://github.com/iro-cp/FCRN-DepthPrediction).
18 |
19 | To implement the code, you need
20 | ```
21 | - fcrn.py
22 | - __init__.py
23 | - network.py
24 | - NYU_FCRN-chekpoint
25 | ```
26 | ### Training Environment in Robot Operating System
27 | - In our setup, **ubuntu 16.04** and **[ROS kinetic](http://wiki.ros.org/kinetic/Installation/Ubuntu)** are used
28 | - Training env file contains the figures of the training map in ROS
29 | - You could use the training environments in **model_editor_models**
30 | - Place **editor models** in your gazebo_model_repository
31 |
32 | ### Training Environment Setup
33 | #### 1. Spawning the drone for training
34 |
35 | Training agent for the drone is [hector_qaudrotor](http://wiki.ros.org/hector_quadrotor). Please take a look at the ROS description and install it.
36 | To spawn the training agent for our setup, type the command below:
37 | ```
38 | roslaunch spawn_quadrotor_with_asus_with_laser.launch
39 | ```
40 | To enable motors, type the command below:
41 | ```
42 | rosservice call /enable_motors true
43 | ```
44 | #### 2. Setting the initial position and velocity of the agent
45 | You could change the initial position and velocity in the ENV.py.
46 | - To change the spawining position of the drone, change the **spawn_table** in the ENV.py
47 | - To change the velocity of the drone, change the action_table: (three linear speed, five angular rate)
48 |
49 | ### Training
50 | To train the model `python3 D3QN.py`. You could change the hyperparameters in the D3QN.py.
51 | ### Testing
52 | - Simulation Test: To test the model, please change the trained model's directory and then type `python3 Test.py`.
53 | - Real world experiment test: Go to real_world_test file and run the `D3QN_test.py`.
54 |
55 | ### Citation
56 | ```
57 | @article{kim2022towards,
58 | title={Towards monocular vision-based autonomous flight through deep reinforcement learning},
59 | author={Kim, Minwoo and Kim, Jongyun and Jung, Minjae and Oh, Hyondong},
60 | journal={Expert Systems with Applications},
61 | volume={198},
62 | pages={116742},
63 | year={2022},
64 | publisher={Elsevier}
65 | }
66 | ```
67 |
--------------------------------------------------------------------------------
/Test_D3QN .py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 |
5 | import warnings
6 | warnings.simplefilter(action='ignore', category=FutureWarning)
7 |
8 | from keras.layers.convolutional import Conv2D
9 | from keras.layers import Input, Dense, Flatten, Lambda, add
10 | from keras.optimizers import RMSprop, Adam
11 | from keras.models import Sequential ,load_model, Model, model_from_json
12 | from keras.backend.tensorflow_backend import set_session
13 | from skimage.color import rgb2gray
14 |
15 | from collections import deque
16 | from keras import backend as K
17 | from ENV import ENV
18 | from keras.backend.tensorflow_backend import set_session
19 | from keras.layers import Input, Dense, Flatten, Lambda, add, Conv2D
20 | from keras.models import Sequential ,load_model, Model, model_from_json
21 | from keras.optimizers import RMSprop, Adam
22 | from matplotlib import style, gridspec
23 | from skimage.color import rgb2gray
24 | from sensor_msgs.msg import LaserScan, Image, Imu
25 | from nav_msgs.msg import Odometry
26 | from skimage.transform import resize
27 | from gazebo_msgs.msg import ModelState, ModelStates
28 | from geometry_msgs.msg import Vector3Stamped
29 | from skimage.transform import resize
30 | from PIL import Image as iimage
31 | from matplotlib import style, gridspec
32 | from keras.models import model_from_json
33 | from keras.utils import to_categorical
34 | from nav_msgs.msg import Odometry
35 | from PIL import Image as iimage
36 | from tensorflow import Session, Graph
37 |
38 |
39 | import cv2
40 | import copy
41 | import matplotlib
42 | import matplotlib.pyplot as plt
43 | import rospy
44 | import tensorflow as tf
45 | import scipy.misc
46 | import models
47 | import numpy as np
48 | import pickle
49 | import rospy
50 | import random
51 | import time
52 | import random
53 | import pickle
54 | import models
55 | import cv2
56 | import copy
57 |
58 |
59 | # load depth estimation model
60 | with tf.name_scope("predict"):
61 | height = 228
62 | width = 304
63 | channels = 3
64 | batch_size = 1
65 | input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))
66 | model_data_path = './NYU_FCRN-checkpoint/NYU_FCRN.ckpt'
67 |
68 | print('start create the session and model')
69 | net = models.ResNet50UpProj({'data': input_node}, batch_size, 1, False)
70 | config = tf.ConfigProto()
71 | cnn_sess = tf.Session(config=config)
72 | cnn_saver = tf.train.Saver()
73 | cnn_saver.restore(cnn_sess, model_data_path)
74 | print('Finishied')
75 |
76 | # define empty parameters
77 | laser = None
78 | velocity = None
79 | vel = None
80 | theta = None
81 | pose = None
82 | orientation = None
83 | image = None
84 | depth_img = None
85 |
86 | def callback_laser(msg):
87 | global laser
88 | laser = msg
89 | laser = laser.ranges
90 |
91 | def DepthCallBack(img):
92 | global depth_img
93 | depth_img = img.data
94 | import scipy.misc
95 | import tensorflow as tf
96 | import time
97 |
98 | from geometry_msgs.msg import TwistStamped
99 | from geometry_msgs.msg import PoseStamped
100 | from std_msgs.msg import Int8
101 | from std_msgs.msg import Float32MultiArray
102 | from std_msgs.msg import Bool
103 |
104 | # load model for collision probability estimation
105 | from keras.models import model_from_json
106 |
107 | # define empty image variable
108 | depth = np.zeros([128,160]).astype('float32')
109 | obs_flag = False
110 |
111 | def callback_camera(msg):
112 | global image
113 | image = np.frombuffer(msg.data, dtype=np.uint8)
114 | image = np.reshape(image, [480,640,3])
115 | image = np.array(image)
116 |
117 | def state_callback(msg):
118 | global velocity, pose, orientation, vel, theta
119 | idx = msg.name.index("quadrotor")
120 |
121 | pose = msg.pose[idx].position
122 | orientation = msg.pose[idx].orientation
123 | vel = msg.twist[idx]
124 |
125 | velocity_x = vel.linear.x
126 | velocity_y = vel.linear.y
127 | velocity = np.sqrt(velocity_x**2 + velocity_y**2)
128 | theta = vel.angular.z
129 |
130 | def GetDepthObservation(image):
131 | width = 304
132 | height = 228
133 |
134 | test_image = iimage.fromarray(image,'RGB')
135 | test_image = test_image.resize([width, height], iimage.ANTIALIAS)
136 | test_image = np.array(test_image).astype('float32')
137 | test_image = np.expand_dims(np.asarray(test_image), axis = 0)
138 | pred = cnn_sess.run(net.get_output(), feed_dict={input_node: test_image})
139 | pred = np.reshape(pred, [128,160])
140 | pred = np.array(pred, dtype=np.float32)
141 |
142 | pred[np.isnan(pred)] = 5.
143 | pred = pred / 3.5
144 | pred[pred > 1.0] = 1.0
145 |
146 | return pred
147 |
148 | def GetKineticDepthObservation(depth_img):
149 |
150 | noise = np.random.random([128,160]) * 0.5
151 | a = copy.deepcopy(depth_img)
152 | a = np.frombuffer(a, dtype = np.float32)
153 | a = np.reshape(a,[480,640])
154 | a = resize(a, [128,160])
155 | a = np.array(a)
156 | a[np.isnan(a)] = 5. ## YOU SHHOULD CHANGE THIS!!!!!!!!!
157 |
158 | dim =[128, 160]
159 | gauss = np.random.normal(0., 1.0, dim)
160 | gauss = gauss.reshape(dim[0], dim[1])
161 | a = np.array(a, dtype=np.float32)
162 | a = a + gauss
163 | a[a<0.00001] = 0.
164 | a[a > 5.0] = 5.0
165 | a = a/5
166 | a = cv2.GaussianBlur(a, (25,25),0)
167 |
168 | return a
169 |
170 |
171 | def crash_check(laser_data, velocity, theta, delta_depth):
172 | laser_sensor = np.array(laser_data)
173 | laser_index = np.isinf(laser_sensor)
174 | laser_sensor[laser_index] = 30
175 | laser_sensor = np.array(laser_sensor[300:800])
176 | done = False
177 | vel_flag = False
178 | zone_1_flag = False
179 |
180 | crash_reward = 0
181 | depth_reward = 0
182 | vel_reward = 0
183 | depth_value = (np.min(laser_sensor) - 0.5) / 2.0
184 |
185 | # reward for zone 1
186 | if depth_value >= 0.4:
187 | depth_reward = 0.4
188 | vel_flag = True
189 |
190 | # reward for zone 2
191 | else:
192 | vel_factor = np.absolute(np.cos(velocity))
193 | _depth_reward = depth_value * vel_factor + delta_depth
194 | depth_reward = np.min([0.4, _depth_reward])
195 | vel_flag = False
196 |
197 | # reward for crash
198 | if np.min(laser_sensor) <= 0.6:
199 | done = True
200 | vel_flag = False
201 |
202 | # reward for velocity
203 | else:
204 | if vel_flag:
205 | vel_reward = velocity * np.cos(theta)* 0.2
206 |
207 | else:
208 | vel_reward = 0
209 | # select reward
210 | if done:
211 | reward = -1.0
212 | else:
213 | reward = depth_reward + vel_reward
214 |
215 | return done, reward, np.min(laser_sensor), depth_value
216 |
217 | def depth_change(depth,_depth):
218 | laser = depth # current depth
219 | _laser = _depth # previous depth
220 | eta = 0.2
221 |
222 | delta_depth = eta * np.sign(laser - _laser)
223 | return delta_depth
224 |
225 | image = np.array(image)
226 |
227 |
228 | def show_figure(image):
229 | #show image using cv2
230 | image = cv2.resize(image, (256*2, 320*2), interpolation=cv2.INTER_CUBIC)
231 | image = cv2.resize(image, (256, 320), interpolation=cv2.INTER_CUBIC)
232 | cv2.imshow('Prediction image', image)
233 | cv2.waitKey(1)
234 |
235 |
236 | def get_depth(img_from_depth_est):
237 | global depth
238 | global obs_flag
239 |
240 | obs_threshold = 0.55
241 |
242 | #np_image = np.frombuffer(img_from_depth_est.data, np.float32)
243 | np_image = img_from_depth_est.data
244 | np_image = np.reshape(np_image, [480, 640])
245 |
246 | # obstacle detecting array
247 | obs_detector_array = copy.deepcopy(np_image[:-100,100:-100])
248 | obs_detector_array[obs_detector_array >= obs_threshold] = 1
249 | obs_detector_array[obs_detector_array < obs_threshold] = 0
250 | cv2.imshow("obs_detect",obs_detector_array)
251 | cv2.waitKey(1)
252 |
253 | detection_threshold = np.average(obs_detector_array)
254 | print(detection_threshold)
255 | if detection_threshold <= 0.94:
256 | obs_flag = True
257 | print("obstacle detected")
258 | else:
259 | obs_flag = False
260 |
261 | pil_image = iimage.fromarray(np.float32(np_image))
262 |
263 | pil_image = pil_image.resize((160, 128), iimage.LANCZOS)
264 | depth = np.array(pil_image)
265 | # show_figure(depth)
266 |
267 |
268 | init = tf.global_variables_initializer()
269 | config = tf.ConfigProto()
270 | config.gpu_options.allow_growth = True
271 | config.gpu_options.per_process_gpu_memory_fraction = 0.3
272 | sess = tf.Session(config=config)
273 | sess.run(init)
274 |
275 |
276 | class TestAgent:
277 | def __init__(self, action_size):
278 | self.state_size = (128, 160 , 6)
279 | self.state_size = (128, 160, 6)
280 | self.action_size = action_size
281 | self.model = multi_gpu_model(self.build_model(), gpus = 2)
282 | " Erase the config and tf.initializer when you load another model by keras!!!"
283 |
284 | self.model = self.build_model()
285 | " Erase the config and tf.initializer when you load another model by keras"
286 |
287 | def build_model(self):
288 | input = Input(shape=self.state_size)
289 | h1 = Conv2D(32, (8, 8), strides = (8,8), activation = "relu", name = "conv1")(input)
290 | def build_model(self):
291 |
292 |
293 | def get_action(self, history):
294 | flag = False
295 | if np.random.random() < 0.001:
296 | flag = True
297 | return random.randrange(8), flag
298 |
299 | history = np.float32(history)
300 | q_value = self.model.predict(history)
301 | return np.argmax(q_value[0]), flag
302 |
303 |
304 | with graph.as_default():
305 | q_value = self.model.predict(history)
306 |
307 | return np.argmax(q_value[0])
308 |
309 | def load_model(self, filename):
310 | self.model.load_weights(filename)
311 |
312 |
313 | global graph
314 | graph = tf.get_default_graph()
315 |
316 |
317 | if __name__ == '__main__':
318 | rospy.init_node('env', anonymous=True)
319 | env = ENV()
320 |
321 | rospy.init_node('Avoider', anonymous=True)
322 |
323 | # Parameter setting for the simulation
324 | # Class name should be different from the original one
325 | agent = TestAgent(action_size = 8)
326 | EPISODE = 1000000
327 | global_step = 0
328 |
329 | agent = TestAgent(action_size = 8) ## class name should be different from the original one
330 |
331 | # Observe
332 | rospy.Subscriber('/camera/depth/image_raw', Image, DepthCallBack,queue_size = 10)
333 | rospy.Subscriber('/camera/rgb/image_raw', Image, callback_camera,queue_size = 10)
334 | rospy.Subscriber("/scan", LaserScan, callback_laser,queue_size = 10)
335 | rospy.Subscriber('gazebo/model_states', ModelStates, state_callback, queue_size= 10)
336 | rospy.sleep(2.)
337 |
338 | e = 0
339 | # Change the rosSubscriber to another ros topic
340 | rospy.Subscriber('/Depth_est', Float32MultiArray, get_depth, queue_size = 10)
341 | OA_action_pubslisher = rospy.Publisher('/OA_action', Int8, queue_size=10)
342 | OA_flag_publisher = rospy.Publisher('/OA_flag', Bool, queue_size=10)
343 | # rospy.sleep(2.)
344 |
345 | rate = rospy.Rate(5)
346 | agent.load_model("./save_model/D3QN_V3.h5")
347 |
348 | while e < EPISODE and not rospy.is_shutdown():
349 | e = e + 1
350 | env.reset_sim(pose, orientation)
351 | # get the initial state
352 | state = GetDepthObservation(image)
353 | history = np.stack((state, state, state, state, state, state), axis = 2)
354 | history = np.reshape([history], (1,128,160,6))
355 |
356 | laser_distance = np.stack((0, 0))
357 | delta_depth = 0
358 |
359 | # model for collision avoidance
360 | agent.load_model("./save_model/D3QN_V_3_single.h5")
361 |
362 | # get the initial state
363 | state = depth
364 | history = np.stack((state, state, state, state, state, state), axis = 2)
365 | history = np.reshape([history], (1,128,160,6))
366 |
367 | while not rospy.is_shutdown():
368 |
369 | action = agent.get_action(history)
370 | print(action)
371 |
372 | OA_action_pubslisher.publish(action)
373 | OA_flag_publisher.publish(obs_flag)
374 |
375 | step, score = 0. ,0.
376 | done = False
377 |
378 | while not done and not rospy.is_shutdown():
379 |
380 | global_step = global_step + 1
381 | step = step + 1
382 |
383 | # get action through D3QN policy
384 | [action, flag] = agent.get_action(history)
385 | env.Control(action)
386 |
387 | # Observe: get_reward
388 | [done, reward, _depth, depth_value] = crash_check(laser, velocity, theta, delta_depth)
389 | delta_depth = depth_change(laser_distance[0], laser_distance[1])
390 |
391 | # image preprocessing
392 | next_state = GetDepthObservation(image)
393 | next_distance = _depth
394 | show_figure(next_state)
395 |
396 | # image for collision check
397 | next_state = np.reshape([next_state],(1,128,160,1))
398 | next_history = np.append(next_state, history[:,:,:,:5],axis = 3)
399 |
400 | # action hisotory
401 | _action = to_categorical(action, 8)
402 | next_action_history = np.append(_action, action_history[:5,:])
403 | next_action_history = np.reshape([next_action_history], [6,8])
404 |
405 | # store next states
406 | history = next_history
407 | action_history = next_action_history
408 |
409 | if step >= 2000:
410 | done = True
411 | # Update the score
412 | score +=reward
413 | rate.sleep()
414 |
415 | if done:
416 | print("This is score " + str(score))
417 | # image preprocessing
418 | next_state = depth
419 | #show_figure(next_state)
420 |
421 | # image for collision check
422 | next_state = np.reshape([next_state],(1,128,160,1))
423 | next_history = np.append(next_state, history[:,:,:,:5],axis = 3)
424 |
425 | history = next_history
426 | rate.sleep()
427 |
--------------------------------------------------------------------------------
/Test_D3QN.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | import warnings
4 | warnings.simplefilter(action='ignore', category=FutureWarning)
5 |
6 | from collections import deque
7 | from keras import backend as K
8 | from keras.backend.tensorflow_backend import set_session
9 | from keras.layers import Input, Dense, Flatten, Lambda, add, Conv2D
10 | from keras.models import Sequential ,load_model, Model, model_from_json
11 | from keras.optimizers import RMSprop, Adam
12 | from matplotlib import style, gridspec
13 | from skimage.color import rgb2gray
14 | from sensor_msgs.msg import LaserScan, Image, Imu
15 | from skimage.transform import resize
16 | from gazebo_msgs.msg import ModelState, ModelStates
17 | from geometry_msgs.msg import Vector3Stamped
18 | from nav_msgs.msg import Odometry
19 | from PIL import Image as iimage
20 | from tensorflow import Session, Graph
21 |
22 | import cv2
23 | import copy
24 | import matplotlib
25 | import matplotlib.pyplot as plt
26 | import models
27 | import numpy as np
28 | import pickle
29 | import rospy
30 | import random
31 | import random
32 | import scipy.misc
33 | import tensorflow as tf
34 | import time
35 |
36 | from geometry_msgs.msg import TwistStamped
37 | from geometry_msgs.msg import PoseStamped
38 | from std_msgs.msg import Int8
39 | from std_msgs.msg import Float32MultiArray
40 | from std_msgs.msg import Bool
41 |
42 | # load model for collision probability estimation
43 | from keras.models import model_from_json
44 |
45 | # define empty image variable
46 | depth = np.zeros([128,160]).astype('float32')
47 | obs_flag = False
48 |
49 | def callback_camera(msg):
50 | global image
51 | image = np.frombuffer(msg.data, dtype=np.uint8)
52 | image = np.reshape(image, [480,640,3])
53 | image = np.array(image)
54 |
55 |
56 | def show_figure(image):
57 | #show image using cv2
58 | image = cv2.resize(image, (256, 320), interpolation=cv2.INTER_CUBIC)
59 | cv2.imshow('Prediction image', image)
60 | cv2.waitKey(1)
61 |
62 |
63 | def get_depth(img_from_depth_est):
64 | global depth
65 | global obs_flag
66 |
67 | obs_threshold = 0.55
68 |
69 | #np_image = np.frombuffer(img_from_depth_est.data, np.float32)
70 | np_image = img_from_depth_est.data
71 | np_image = np.reshape(np_image, [480, 640])
72 |
73 | # obstacle detecting array
74 | obs_detector_array = copy.deepcopy(np_image[:-100,100:-100])
75 | obs_detector_array[obs_detector_array >= obs_threshold] = 1
76 | obs_detector_array[obs_detector_array < obs_threshold] = 0
77 | cv2.imshow("obs_detect",obs_detector_array)
78 | cv2.waitKey(1)
79 |
80 | detection_threshold = np.average(obs_detector_array)
81 | print(detection_threshold)
82 | if detection_threshold <= 0.94:
83 | obs_flag = True
84 | print("obstacle detected")
85 | else:
86 | obs_flag = False
87 |
88 | pil_image = iimage.fromarray(np.float32(np_image))
89 |
90 | pil_image = pil_image.resize((160, 128), iimage.LANCZOS)
91 | depth = np.array(pil_image)
92 | # show_figure(depth)
93 |
94 |
95 | init = tf.global_variables_initializer()
96 | config = tf.ConfigProto()
97 | config.gpu_options.allow_growth = True
98 | config.gpu_options.per_process_gpu_memory_fraction = 0.3
99 | sess = tf.Session(config=config)
100 | sess.run(init)
101 |
102 |
103 | class TestAgent:
104 | def __init__(self, action_size):
105 | self.state_size = (128, 160, 6)
106 | self.action_size = action_size
107 | self.model = self.build_model()
108 | " Erase the config and tf.initializer when you load another model by keras"
109 |
110 | def build_model(self):
111 | input = Input(shape=self.state_size)
112 | h1 = Conv2D(32, (8, 8), strides = (8,8), activation = "relu", name = "conv1")(input)
113 | h2 = Conv2D(64, (4, 4), strides = (2,2), activation = "relu", name = "conv2")(h1)
114 | h3 = Conv2D(64, (3, 3), strides = (1,1), activation = "relu", name = "conv3")(h2)
115 | context = Flatten(name = "flatten")(h3)
116 |
117 | value_hidden = Dense(512, activation = 'relu', name = 'value_fc')(context)
118 | value = Dense(1, name = "value")(value_hidden)
119 | action_hidden = Dense(512, activation = 'relu', name = 'action_fc')(context)
120 | action = Dense(self.action_size, name = "action")(action_hidden)
121 | action_mean = Lambda(lambda x: tf.reduce_mean(x, axis = 1, keepdims = True), name = 'action_mean')(action)
122 | output = Lambda(lambda x: x[0] + x[1] - x[2], name = 'output')([action, value, action_mean])
123 | model = Model(inputs = input, outputs = output)
124 | model.summary()
125 |
126 | return model
127 |
128 |
129 | def get_action(self, history):
130 |
131 | history = np.float32(history)
132 |
133 | with graph.as_default():
134 | q_value = self.model.predict(history)
135 |
136 | return np.argmax(q_value[0])
137 |
138 | def load_model(self, filename):
139 | self.model.load_weights(filename)
140 | global graph
141 | graph = tf.get_default_graph()
142 |
143 |
144 | if __name__ == '__main__':
145 | rospy.init_node('Avoider', anonymous=True)
146 |
147 | # Parameter setting for the simulation
148 | agent = TestAgent(action_size = 8) ## class name should be different from the original one
149 |
150 | # Observe
151 | # Change the rosSubscriber to another ros topic
152 | rospy.Subscriber('/Depth_est', Float32MultiArray, get_depth, queue_size = 10)
153 | OA_action_pubslisher = rospy.Publisher('/OA_action', Int8, queue_size=10)
154 | OA_flag_publisher = rospy.Publisher('/OA_flag', Bool, queue_size=10)
155 | # rospy.sleep(2.)
156 |
157 | rate = rospy.Rate(5)
158 |
159 | # model for collision avoidance
160 | agent.load_model("./save_model/D3QN_V_3_single.h5")
161 |
162 | # get the initial state
163 | state = depth
164 | history = np.stack((state, state, state, state, state, state), axis = 2)
165 | history = np.reshape([history], (1,128,160,6))
166 |
167 | while not rospy.is_shutdown():
168 |
169 | action = agent.get_action(history)
170 | print(action)
171 |
172 | OA_action_pubslisher.publish(action)
173 | OA_flag_publisher.publish(obs_flag)
174 |
175 | # image preprocessing
176 | next_state = depth
177 | #show_figure(next_state)
178 |
179 | # image for collision check
180 | next_state = np.reshape([next_state],(1,128,160,1))
181 | next_history = np.append(next_state, history[:,:,:,:5],axis = 3)
182 |
183 | history = next_history
184 | rate.sleep()
185 |
186 |
187 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_0/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | DQN_world_0
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_0/model.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 0 0 0 0 -0 0
6 | 0
7 | 0
8 |
9 | 0
10 |
11 |
12 | 20 20 0.1
13 |
14 |
15 |
16 |
21 | 0.5 0.5 0.5 1
22 | 0.5 0.5 0.5 1
23 | 0.2 0.2 0.2 1
24 | 0 0 0 1
25 |
26 | 0 0 0 0 -0 0
27 | 0
28 |
29 |
30 | 0
31 | 10
32 | 0 0 0 0 -0 0
33 |
34 |
35 | 20 20 0.1
36 |
37 |
38 |
39 |
40 |
41 | 1
42 | 1
43 | 0 0 0
44 | 0
45 | 0
46 |
47 |
48 | 1
49 | 0
50 | 0
51 | 1
52 |
53 | 0
54 |
55 |
56 |
57 |
58 | 0
59 | 1e+06
60 |
61 |
62 | 0
63 | 1
64 | 1
65 |
66 | 0
67 | 0.2
68 | 1e+13
69 | 1
70 | 0.01
71 | 0
72 |
73 |
74 | 1
75 | -0.01
76 | 0
77 | 0.2
78 | 1e+13
79 | 1
80 |
81 |
82 |
83 |
84 |
85 |
86 | 4.42205 4.53667 0.5 0 -0 0
87 |
88 | 9.33899
89 |
90 | 68.6547
91 | 0
92 | 0
93 | 68.6547
94 | 0
95 | 1.5565
96 |
97 | 0 0 0 0 -0 0
98 |
99 | 0
100 | 0
101 |
102 | 0 0 0 0 -0 0
103 |
104 |
105 | 1 1 9.33898
106 |
107 |
108 |
109 | 1
110 |
114 | 0.3 0.3 0.3 1
115 | 0.7 0.7 0.7 1
116 | 0.01 0.01 0.01 1
117 | 0 0 0 1
118 |
119 | __default__
120 |
121 |
122 | 1
123 | 0
124 |
125 |
126 | 0
127 | 10
128 | 0 0 0 0 -0 0
129 |
130 |
131 | 1 1 9.33898
132 |
133 |
134 |
135 |
136 |
137 | 1
138 | 1
139 | 0 0 0
140 | 0
141 | 0
142 |
143 |
144 | 1
145 | 0
146 | 0
147 | 1
148 |
149 | 0
150 |
151 |
152 |
153 |
154 | 0
155 | 1e+06
156 |
157 |
158 | 0
159 | 1
160 | 1
161 |
162 | 0
163 | 0.2
164 | 1e+13
165 | 1
166 | 0.01
167 | 0
168 |
169 |
170 | 1
171 | -0.01
172 | 0
173 | 0.2
174 | 1e+13
175 | 1
176 |
177 |
178 |
179 |
180 |
181 |
182 | -5.06208 -2.97464 0.5 0 -0 0
183 |
184 | 9.33899
185 |
186 | 68.6547
187 | 0
188 | 0
189 | 68.6547
190 | 0
191 | 1.5565
192 |
193 | 0 0 0 0 -0 0
194 |
195 | 0
196 | 0
197 |
198 | 0 0 0 0 -0 0
199 |
200 |
201 | 1 1 9.33898
202 |
203 |
204 |
205 | 1
206 |
210 | 0.3 0.3 0.3 1
211 | 0.7 0.7 0.7 1
212 | 0.01 0.01 0.01 1
213 | 0 0 0 1
214 |
215 | __default__
216 |
217 |
218 | 1
219 | 0
220 |
221 |
222 | 0
223 | 10
224 | 0 0 0 0 -0 0
225 |
226 |
227 | 1 1 9.33898
228 |
229 |
230 |
231 |
232 |
233 | 1
234 | 1
235 | 0 0 0
236 | 0
237 | 0
238 |
239 |
240 | 1
241 | 0
242 | 0
243 | 1
244 |
245 | 0
246 |
247 |
248 |
249 |
250 | 0
251 | 1e+06
252 |
253 |
254 | 0
255 | 1
256 | 1
257 |
258 | 0
259 | 0.2
260 | 1e+13
261 | 1
262 | 0.01
263 | 0
264 |
265 |
266 | 1
267 | -0.01
268 | 0
269 | 0.2
270 | 1e+13
271 | 1
272 |
273 |
274 |
275 |
276 |
277 |
278 | 1
279 |
280 | 0 0 1.5 0 -0 0
281 |
282 |
283 |
284 | 3 1 3
285 |
286 |
287 | 10
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 | model://brick_box_3x1x3/meshes/simple_box.dae
305 | 3 1 3
306 |
307 |
308 |
309 |
314 |
315 |
316 | 0
317 | 0
318 | 1
319 |
320 | -4.06208 4.02536 0 0 -0 0
321 |
322 |
323 | 1
324 |
325 | 0 0 1.5 0 -0 0
326 |
327 |
328 |
329 | 3 1 3
330 |
331 |
332 | 10
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 | model://brick_box_3x1x3/meshes/simple_box.dae
350 | 3 1 3
351 |
352 |
353 |
354 |
359 |
360 |
361 | 0
362 | 0
363 | 1
364 |
365 | 4.93792 -5.97464 0 0 -0 0
366 |
367 | 1
368 | 1
369 |
370 |
371 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_1/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | DQN_world_1
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_1/model.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 0 0 0 0 -0 0
6 | 0
7 | 0
8 |
9 | 0
10 |
11 |
12 | 20 20 0.1
13 |
14 |
15 |
16 |
21 | 0.5 0.5 0.5 1
22 | 0.5 0.5 0.5 1
23 | 0.2 0.2 0.2 1
24 | 0 0 0 1
25 |
26 | 0 0 0 0 -0 0
27 | 0
28 |
29 |
30 | 0
31 | 10
32 | 0 0 0 0 -0 0
33 |
34 |
35 | 20 20 0.1
36 |
37 |
38 |
39 |
40 |
41 | 1
42 | 1
43 | 0 0 0
44 | 0
45 | 0
46 |
47 |
48 | 1
49 | 0
50 | 0
51 | 1
52 |
53 | 0
54 |
55 |
56 |
57 |
58 | 0
59 | 1e+06
60 |
61 |
62 | 0
63 | 1
64 | 1
65 |
66 | 0
67 | 0.2
68 | 1e+13
69 | 1
70 | 0.01
71 | 0
72 |
73 |
74 | 1
75 | -0.01
76 | 0
77 | 0.2
78 | 1e+13
79 | 1
80 |
81 |
82 |
83 |
84 |
85 |
86 | 4.42205 4.53667 0.5 0 -0 0
87 |
88 | 9.33899
89 |
90 | 68.6547
91 | 0
92 | 0
93 | 68.6547
94 | 0
95 | 1.5565
96 |
97 | 0 0 0 0 -0 0
98 |
99 | 1
100 | 0
101 | 0
102 |
103 | 0 0 0 0 -0 0
104 |
105 |
106 | 1 1 9.33898
107 |
108 |
109 |
110 | 1
111 |
115 | 0.3 0.3 0.3 1
116 | 0.7 0.7 0.7 1
117 | 0.01 0.01 0.01 1
118 | 0 0 0 1
119 |
120 | __default__
121 |
122 |
123 | 1
124 | 0
125 |
126 |
127 | 0
128 | 10
129 | 0 0 0 0 -0 0
130 |
131 |
132 | 1 1 9.33898
133 |
134 |
135 |
136 |
137 |
138 | 1
139 | 1
140 | 0 0 0
141 | 0
142 | 0
143 |
144 |
145 | 1
146 | 0
147 | 0
148 | 1
149 |
150 | 0
151 |
152 |
153 |
154 |
155 | 0
156 | 1e+06
157 |
158 |
159 | 0
160 | 1
161 | 1
162 |
163 | 0
164 | 0.2
165 | 1e+13
166 | 1
167 | 0.01
168 | 0
169 |
170 |
171 | 1
172 | -0.01
173 | 0
174 | 0.2
175 | 1e+13
176 | 1
177 |
178 |
179 |
180 |
181 |
182 |
183 | -5.06208 -2.97464 0.5 0 -0 0
184 |
185 | 9.33899
186 |
187 | 68.6547
188 | 0
189 | 0
190 | 68.6547
191 | 0
192 | 1.5565
193 |
194 | 0 0 0 0 -0 0
195 |
196 | 0
197 | 0
198 |
199 | 0 0 0 0 -0 0
200 |
201 |
202 | 1 1 9.33898
203 |
204 |
205 |
206 | 1
207 |
211 | 0.3 0.3 0.3 1
212 | 0.7 0.7 0.7 1
213 | 0.01 0.01 0.01 1
214 | 0 0 0 1
215 |
216 | __default__
217 |
218 |
219 | 1
220 | 0
221 |
222 |
223 | 0
224 | 10
225 | 0 0 0 0 -0 0
226 |
227 |
228 | 1 1 9.33898
229 |
230 |
231 |
232 |
233 |
234 | 1
235 | 1
236 | 0 0 0
237 | 0
238 | 0
239 |
240 |
241 | 1
242 | 0
243 | 0
244 | 1
245 |
246 | 0
247 |
248 |
249 |
250 |
251 | 0
252 | 1e+06
253 |
254 |
255 | 0
256 | 1
257 | 1
258 |
259 | 0
260 | 0.2
261 | 1e+13
262 | 1
263 | 0.01
264 | 0
265 |
266 |
267 | 1
268 | -0.01
269 | 0
270 | 0.2
271 | 1e+13
272 | 1
273 |
274 |
275 |
276 |
277 |
278 |
279 | 1
280 |
281 | 0 0 1.5 0 -0 0
282 |
283 |
284 |
285 | 3 1 3
286 |
287 |
288 |
289 |
290 |
291 |
292 | model://brick_box_3x1x3/meshes/simple_box.dae
293 | 3 1 3
294 |
295 |
296 |
297 |
302 |
303 |
304 |
305 | -4.06208 4.02536 0 0 -0 0
306 |
307 |
308 | 1
309 |
310 | 0 0 1.5 0 -0 0
311 |
312 |
313 |
314 | 3 1 3
315 |
316 |
317 |
318 |
319 |
320 |
321 | model://brick_box_3x1x3/meshes/simple_box.dae
322 | 3 1 3
323 |
324 |
325 |
326 |
331 |
332 |
333 |
334 | 4.93792 -5.97464 0 0 -0 0
335 |
336 |
337 |
338 | -8.81665 -2.69351 1.4 0 -0 0
339 |
340 |
341 | nan
342 | 0
343 | 0
344 | nan
345 | 0
346 | nan
347 |
348 | 0
349 | 0 0 0 0 -0 0
350 |
351 | 0
352 | 0
353 |
354 | 0
355 |
356 |
357 | 20.2056 0.556506 8.63552
358 |
359 |
360 |
361 |
366 | 1 1 1 1
367 | 1 1 1 1
368 | 0 0 0 1
369 | 0 0 0 1
370 |
371 | 0 0 0 0 -0 0
372 | 0
373 |
374 |
375 | 0
376 | 10
377 | 0 0 0 0 -0 0
378 |
379 |
380 | 20.2056 0.556506 8.63552
381 |
382 |
383 |
384 |
385 |
386 | 1
387 | 1
388 | 0 0 0
389 | 0
390 | 0
391 |
392 |
393 | 1
394 | 0
395 | 0
396 | 1
397 |
398 | 0
399 |
400 |
401 |
402 |
403 | 0
404 | 1e+06
405 |
406 |
407 | 0
408 | 1
409 | 1
410 |
411 | 0
412 | 0.2
413 | 1e+13
414 | 1
415 | 0.01
416 | 0
417 |
418 |
419 | 1
420 | -0.01
421 | 0
422 | 0.2
423 | 1e+13
424 | 1
425 |
426 |
427 |
428 |
429 | 1
430 |
431 | 1
432 | 1
433 | 8.86533 13.0451 0 0 -0 0
434 |
435 |
436 |
437 | -8.81665 -2.69351 1.4 0 -0 0
438 |
439 |
440 | nan
441 | 0
442 | 0
443 | nan
444 | 0
445 | nan
446 |
447 | 0
448 | 0 0 0 0 -0 0
449 |
450 | 0
451 | 0
452 |
453 | 0
454 |
455 |
456 | 20.2056 0.556506 8.63552
457 |
458 |
459 |
460 |
465 | 1 1 1 1
466 | 1 1 1 1
467 | 0 0 0 1
468 | 0 0 0 1
469 |
470 | 0 0 0 0 -0 0
471 | 0
472 |
473 |
474 | 0
475 | 10
476 | 0 0 0 0 -0 0
477 |
478 |
479 | 20.2056 0.556506 8.63552
480 |
481 |
482 |
483 |
484 |
485 | 1
486 | 1
487 | 0 0 0
488 | 0
489 | 0
490 |
491 |
492 | 1
493 | 0
494 | 0
495 | 1
496 |
497 | 0
498 |
499 |
500 |
501 |
502 | 0
503 | 1e+06
504 |
505 |
506 | 0
507 | 1
508 | 1
509 |
510 | 0
511 | 0.2
512 | 1e+13
513 | 1
514 | 0.01
515 | 0
516 |
517 |
518 | 1
519 | -0.01
520 | 0
521 | 0.2
522 | 1e+13
523 | 1
524 |
525 |
526 |
527 |
528 | 1
529 |
530 | 1
531 | 1
532 | 8.94987 -7.52388 0 0 -0 0
533 |
534 |
535 |
536 | -8.81665 -2.69351 1.4 0 -0 0
537 |
538 |
539 | nan
540 | 0
541 | 0
542 | nan
543 | 0
544 | nan
545 |
546 | 0
547 | 0 0 0 0 -0 0
548 |
549 | 0
550 | 0
551 |
552 | 0
553 |
554 |
555 | 20.2056 0.556506 8.63552
556 |
557 |
558 |
559 |
564 | 1 1 1 1
565 | 1 1 1 1
566 | 0 0 0 1
567 | 0 0 0 1
568 |
569 | 0 0 0 0 -0 0
570 | 0
571 |
572 |
573 | 0
574 | 10
575 | 0 0 0 0 -0 0
576 |
577 |
578 | 20.2056 0.556506 8.63552
579 |
580 |
581 |
582 |
583 |
584 | 1
585 | 1
586 | 0 0 0
587 | 0
588 | 0
589 |
590 |
591 | 1
592 | 0
593 | 0
594 | 1
595 |
596 | 0
597 |
598 |
599 |
600 |
601 | 0
602 | 1e+06
603 |
604 |
605 | 0
606 | 1
607 | 1
608 |
609 | 0
610 | 0.2
611 | 1e+13
612 | 1
613 | 0.01
614 | 0
615 |
616 |
617 | 1
618 | -0.01
619 | 0
620 | 0.2
621 | 1e+13
622 | 1
623 |
624 |
625 |
626 |
627 | 1
628 |
629 | 1
630 | 1
631 | -7.57506 -8.63828 0 0 0 -1.5677
632 |
633 |
634 |
635 | -8.81665 -2.69351 1.4 0 -0 0
636 |
637 |
638 | nan
639 | 0
640 | 0
641 | nan
642 | 0
643 | nan
644 |
645 | 0
646 | 0 0 0 0 -0 0
647 |
648 | 0
649 | 0
650 |
651 | 0
652 |
653 |
654 | 20.2056 0.556506 8.63552
655 |
656 |
657 |
658 |
663 | 1 1 1 1
664 | 1 1 1 1
665 | 0 0 0 1
666 | 0 0 0 1
667 |
668 | 0 0 0 0 -0 0
669 | 0
670 |
671 |
672 | 0
673 | 10
674 | 0 0 0 0 -0 0
675 |
676 |
677 | 20.2056 0.556506 8.63552
678 |
679 |
680 |
681 |
682 |
683 | 1
684 | 1
685 | 0 0 0
686 | 0
687 | 0
688 |
689 |
690 | 1
691 | 0
692 | 0
693 | 1
694 |
695 | 0
696 |
697 |
698 |
699 |
700 | 0
701 | 1e+06
702 |
703 |
704 | 0
705 | 1
706 | 1
707 |
708 | 0
709 | 0.2
710 | 1e+13
711 | 1
712 | 0.01
713 | 0
714 |
715 |
716 | 1
717 | -0.01
718 | 0
719 | 0.2
720 | 1e+13
721 | 1
722 |
723 |
724 |
725 |
726 | 1
727 |
728 | 1
729 | 1
730 | 13.1547 -8.80175 0 0 0 -1.57043
731 |
732 | 1
733 | 1
734 |
735 |
736 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_2/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | DQN_world_2
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_3/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | DQN_world_3
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_4/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | DQN_world_4
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/DQN_world_4/model.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 0 0 0 0 -0 0
6 | 0
7 | 0
8 |
9 | 0
10 |
11 |
12 | 20 20 0.1
13 |
14 |
15 |
16 |
21 | 0.5 0.5 0.5 1
22 | 0.5 0.5 0.5 1
23 | 0.2 0.2 0.2 1
24 | 0 0 0 1
25 |
26 | 0 0 0 0 -0 0
27 | 0
28 |
29 |
30 | 0
31 | 10
32 | 0 0 0 0 -0 0
33 |
34 |
35 | 20 20 0.1
36 |
37 |
38 |
39 |
40 |
41 | 1
42 | 1
43 | 0 0 0
44 | 0
45 | 0
46 |
47 |
48 | 1
49 | 0
50 | 0
51 | 1
52 |
53 | 0
54 |
55 |
56 |
57 |
58 | 0
59 | 1e+06
60 |
61 |
62 | 0
63 | 1
64 | 1
65 |
66 | 0
67 | 0.2
68 | 1e+13
69 | 1
70 | 0.01
71 | 0
72 |
73 |
74 | 1
75 | -0.01
76 | 0
77 | 0.2
78 | 1e+13
79 | 1
80 |
81 |
82 |
83 |
84 |
85 |
86 | 0.3671 19.6373 0 0 -0 0
87 | 0
88 | 0
89 |
90 | 0
91 |
92 |
93 | 20 20 0.1
94 |
95 |
96 |
97 |
102 | 0.5 0.5 0.5 1
103 | 0.5 0.5 0.5 1
104 | 0.2 0.2 0.2 1
105 | 0 0 0 1
106 |
107 |
108 | 0 0 0 0 -0 0
109 | 0
110 |
111 |
112 | 0
113 | 10
114 | 0 0 0 0 -0 0
115 |
116 |
117 | 20 20 0.1
118 |
119 |
120 |
121 |
122 |
123 | 1
124 | 1
125 | 0 0 0
126 | 0
127 | 0
128 |
129 |
130 | 1
131 | 0
132 | 0
133 | 1
134 |
135 | 0
136 |
137 |
138 |
139 |
140 | 0
141 | 1e+06
142 |
143 |
144 | 0
145 | 1
146 | 1
147 |
148 | 0
149 | 0.2
150 | 1e+13
151 | 1
152 | 0.01
153 | 0
154 |
155 |
156 | 1
157 | -0.01
158 | 0
159 | 0.2
160 | 1e+13
161 | 1
162 |
163 |
164 |
165 |
166 |
167 |
168 | 1
169 |
170 |
171 |
172 |
173 | 0.0254 0.0254 0.0254
174 | model://ambulance/meshes/ambulance.obj
175 |
176 |
177 |
178 |
179 |
180 |
181 | 0.0254 0.0254 0.0254
182 | model://ambulance/meshes/ambulance.obj
183 |
184 |
185 |
186 |
187 | 0.806776 14.17 0 0 -0 0
188 |
189 |
190 |
191 | -8.81665 -2.69351 1.4 0 -0 0
192 |
193 |
194 | nan
195 | 0
196 | 0
197 | nan
198 | 0
199 | nan
200 |
201 | 0
202 | 0 0 0 0 -0 0
203 |
204 | 0
205 | 0
206 |
207 | 0
208 |
209 |
210 | 20.2056 0.556506 8.63552
211 |
212 |
213 |
214 |
219 | 1 1 1 1
220 | 1 1 1 1
221 | 0 0 0 1
222 | 0 0 0 1
223 |
224 | 0 0 0 0 -0 0
225 | 0
226 |
227 |
228 | 0
229 | 10
230 | 0 0 0 0 -0 0
231 |
232 |
233 | 20.2056 0.556506 8.63552
234 |
235 |
236 |
237 |
238 |
239 | 1
240 | 1
241 | 0 0 0
242 | 0
243 | 0
244 |
245 |
246 | 1
247 | 0
248 | 0
249 | 1
250 |
251 | 0
252 |
253 |
254 |
255 |
256 | 0
257 | 1e+06
258 |
259 |
260 | 0
261 | 1
262 | 1
263 |
264 | 0
265 | 0.2
266 | 1e+13
267 | 1
268 | 0.01
269 | 0
270 |
271 |
272 | 1
273 | -0.01
274 | 0
275 | 0.2
276 | 1e+13
277 | 1
278 |
279 |
280 |
281 |
282 | 1
283 |
284 | 1
285 | 1
286 | 8.94987 -7.52388 0 0 -0 0
287 |
288 |
289 |
290 | -8.81665 -2.69351 1.4 0 -0 0
291 |
292 |
293 | nan
294 | 0
295 | 0
296 | nan
297 | 0
298 | nan
299 |
300 | 0
301 | 0 0 0 0 -0 0
302 |
303 | 0
304 | 0
305 |
306 | 0
307 |
308 |
309 | 20.2056 0.556506 8.63552
310 |
311 |
312 |
313 |
318 | 1 1 1 1
319 | 1 1 1 1
320 | 0 0 0 1
321 | 0 0 0 1
322 |
323 | 0 0 0 0 -0 0
324 | 0
325 |
326 |
327 | 0
328 | 10
329 | 0 0 0 0 -0 0
330 |
331 |
332 | 20.2056 0.556506 8.63552
333 |
334 |
335 |
336 |
337 |
338 | 1
339 | 1
340 | 0 0 0
341 | 0
342 | 0
343 |
344 |
345 | 1
346 | 0
347 | 0
348 | 1
349 |
350 | 0
351 |
352 |
353 |
354 |
355 | 0
356 | 1e+06
357 |
358 |
359 | 0
360 | 1
361 | 1
362 |
363 | 0
364 | 0.2
365 | 1e+13
366 | 1
367 | 0.01
368 | 0
369 |
370 |
371 | 1
372 | -0.01
373 | 0
374 | 0.2
375 | 1e+13
376 | 1
377 |
378 |
379 |
380 |
381 | 1
382 |
383 | 1
384 | 1
385 | 13.1547 -8.80175 0 0 0 -1.57043
386 |
387 |
388 |
389 | -8.81665 -2.69351 1.4 0 -0 0
390 |
391 |
392 | nan
393 | 0
394 | 0
395 | nan
396 | 0
397 | nan
398 |
399 | 0
400 | 0 0 0 0 -0 0
401 |
402 | 0
403 | 0
404 |
405 | 0
406 |
407 |
408 | 20.2056 0.556506 8.63552
409 |
410 |
411 |
412 |
417 | 1 1 1 1
418 | 1 1 1 1
419 | 0 0 0 1
420 | 0 0 0 1
421 |
422 | 0 0 0 0 -0 0
423 | 0
424 |
425 |
426 | 0
427 | 10
428 | 0 0 0 0 -0 0
429 |
430 |
431 | 20.2056 0.556506 8.63552
432 |
433 |
434 |
435 |
436 |
437 | 1
438 | 1
439 | 0 0 0
440 | 0
441 | 0
442 |
443 |
444 | 1
445 | 0
446 | 0
447 | 1
448 |
449 | 0
450 |
451 |
452 |
453 |
454 | 0
455 | 1e+06
456 |
457 |
458 | 0
459 | 1
460 | 1
461 |
462 | 0
463 | 0.2
464 | 1e+13
465 | 1
466 | 0.01
467 | 0
468 |
469 |
470 | 1
471 | -0.01
472 | 0
473 | 0.2
474 | 1e+13
475 | 1
476 |
477 |
478 |
479 |
480 | 1
481 |
482 | 1
483 | 1
484 | 13.3671 11.1932 0 0 0 -1.57043
485 |
486 |
487 |
488 | -8.81665 -2.69351 1.4 0 -0 0
489 |
490 |
491 | nan
492 | 0
493 | 0
494 | nan
495 | 0
496 | nan
497 |
498 | 0
499 | 0 0 0 0 -0 0
500 |
501 | 0
502 | 0
503 |
504 | 0
505 |
506 |
507 | 20.2056 0.556506 8.63552
508 |
509 |
510 |
511 |
516 | 1 1 1 1
517 | 1 1 1 1
518 | 0 0 0 1
519 | 0 0 0 1
520 |
521 | 0 0 0 0 -0 0
522 | 0
523 |
524 |
525 | 0
526 | 10
527 | 0 0 0 0 -0 0
528 |
529 |
530 | 20.2056 0.556506 8.63552
531 |
532 |
533 |
534 |
535 |
536 | 1
537 | 1
538 | 0 0 0
539 | 0
540 | 0
541 |
542 |
543 | 1
544 | 0
545 | 0
546 | 1
547 |
548 | 0
549 |
550 |
551 |
552 |
553 | 0
554 | 1e+06
555 |
556 |
557 | 0
558 | 1
559 | 1
560 |
561 | 0
562 | 0.2
563 | 1e+13
564 | 1
565 | 0.01
566 | 0
567 |
568 |
569 | 1
570 | -0.01
571 | 0
572 | 0.2
573 | 1e+13
574 | 1
575 |
576 |
577 |
578 |
579 | 1
580 |
581 | 1
582 | 1
583 | -7.16918 11.1435 0 0 0 -1.57043
584 |
585 |
586 |
587 | -8.81665 -2.69351 1.4 0 -0 0
588 |
589 |
590 | nan
591 | 0
592 | 0
593 | nan
594 | 0
595 | nan
596 |
597 | 0
598 | 0 0 0 0 -0 0
599 |
600 | 0
601 | 0
602 |
603 | 0
604 |
605 |
606 | 20.2056 0.556506 8.63552
607 |
608 |
609 |
610 |
615 | 1 1 1 1
616 | 1 1 1 1
617 | 0 0 0 1
618 | 0 0 0 1
619 |
620 | 0 0 0 0 -0 0
621 | 0
622 |
623 |
624 | 0
625 | 10
626 | 0 0 0 0 -0 0
627 |
628 |
629 | 20.2056 0.556506 8.63552
630 |
631 |
632 |
633 |
634 |
635 | 1
636 | 1
637 | 0 0 0
638 | 0
639 | 0
640 |
641 |
642 | 1
643 | 0
644 | 0
645 | 1
646 |
647 | 0
648 |
649 |
650 |
651 |
652 | 0
653 | 1e+06
654 |
655 |
656 | 0
657 | 1
658 | 1
659 |
660 | 0
661 | 0.2
662 | 1e+13
663 | 1
664 | 0.01
665 | 0
666 |
667 |
668 | 1
669 | -0.01
670 | 0
671 | 0.2
672 | 1e+13
673 | 1
674 |
675 |
676 |
677 |
678 | 1
679 |
680 | 1
681 | 1
682 | -7.6329 -8.8994 0 0 0 -1.57043
683 |
684 |
685 |
686 | -8.81665 -2.69351 1.4 0 -0 0
687 |
688 |
689 | nan
690 | 0
691 | 0
692 | nan
693 | 0
694 | nan
695 |
696 | 0
697 | 0 0 0 0 -0 0
698 |
699 | 0
700 | 0
701 |
702 | 0
703 |
704 |
705 | 20.2056 0.556506 8.63552
706 |
707 |
708 |
709 |
714 | 1 1 1 1
715 | 1 1 1 1
716 | 0 0 0 1
717 | 0 0 0 1
718 |
719 | 0 0 0 0 -0 0
720 | 0
721 |
722 |
723 | 0
724 | 10
725 | 0 0 0 0 -0 0
726 |
727 |
728 | 20.2056 0.556506 8.63552
729 |
730 |
731 |
732 |
733 |
734 | 1
735 | 1
736 | 0 0 0
737 | 0
738 | 0
739 |
740 |
741 | 1
742 | 0
743 | 0
744 | 1
745 |
746 | 0
747 |
748 |
749 |
750 |
751 | 0
752 | 1e+06
753 |
754 |
755 | 0
756 | 1
757 | 1
758 |
759 | 0
760 | 0.2
761 | 1e+13
762 | 1
763 | 0.01
764 | 0
765 |
766 |
767 | 1
768 | -0.01
769 | 0
770 | 0.2
771 | 1e+13
772 | 1
773 |
774 |
775 |
776 |
777 | 1
778 |
779 | 1
780 | 1
781 | 9.3671 32.6373 0 0 -0 0
782 |
783 |
784 | 1
785 |
786 |
787 | 0 0 0 0 -0 1.5708
788 |
789 |
790 | 0.0254 0.0254 0.0254
791 | model://hatchback/meshes/hatchback.obj
792 |
793 |
794 |
795 |
796 | 0 0 0 0 -0 1.5708
797 |
798 |
799 | 0.0254 0.0254 0.0254
800 | model://hatchback/meshes/hatchback.obj
801 |
802 |
803 |
804 |
805 | -11.6329 3.10985 0 0 -0 0
806 |
807 | 1
808 | 1
809 |
810 |
811 |
--------------------------------------------------------------------------------
/model_editor_models/MW_world_1/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | MW_world_1
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/MW_world_1_0/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | MW_world_1_0
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/MW_world_1_0_0/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | MW_world_1_0_0
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/door/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | door
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/door/model.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 0.481045 1.72626 2.52716 0 -0 0
6 |
7 |
8 | nan
9 | 0
10 | 0
11 | nan
12 | 0
13 | nan
14 |
15 | 0
16 | 0 0 0 0 -0 0
17 |
18 | 0
19 | 0
20 |
21 | 0
22 |
23 |
24 | 3.53441 0.017772 4.70786
25 |
26 |
27 |
28 |
33 | 1 1 1 1
34 | 1 1 1 1
35 | 0 0 0 1
36 | 0 0 0 1
37 |
38 | 0 0 0 0 -0 0
39 | 0
40 |
41 |
42 | 0
43 | 10
44 | 0 0 0 0 -0 0
45 |
46 |
47 | 3.53441 0.017772 4.70786
48 |
49 |
50 |
51 |
52 |
53 | 1
54 | 1
55 | 0 0 0
56 | 0
57 | 0
58 |
59 |
60 | 1
61 | 0
62 | 0
63 | 1
64 |
65 | 0
66 |
67 |
68 |
69 |
70 | 0
71 | 1e+06
72 |
73 |
74 | 0
75 | 1
76 | 1
77 |
78 | 0
79 | 0.2
80 | 1e+13
81 | 1
82 | 0.01
83 | 0
84 |
85 |
86 | 1
87 | -0.01
88 | 0
89 | 0.2
90 | 1e+13
91 | 1
92 |
93 |
94 |
95 |
96 |
97 | 1
98 | 1
99 |
100 |
101 |
--------------------------------------------------------------------------------
/model_editor_models/engineering_1F/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | engineering_1F
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/engineering_2F/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | engineering_2F
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/engineering_815/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | engineering_815
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/grey_wall_mw/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | grey_wall_mw
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/grey_wall_mw/model.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | -8.81665 -2.69351 1.4 0 -0 0
6 |
7 |
8 | nan
9 | 0
10 | 0
11 | nan
12 | 0
13 | nan
14 |
15 | 0
16 | 0 0 0 0 -0 0
17 |
18 | 0
19 | 0
20 |
21 | 0
22 |
23 |
24 | 20.2056 0.556506 8.63552
25 |
26 |
27 |
28 |
33 | 1 1 1 1
34 | 1 1 1 1
35 | 0 0 0 1
36 | 0 0 0 1
37 |
38 | 0 0 0 0 -0 0
39 | 0
40 |
41 |
42 | 0
43 | 10
44 | 0 0 0 0 -0 0
45 |
46 |
47 | 20.2056 0.556506 8.63552
48 |
49 |
50 |
51 |
52 |
53 | 1
54 | 1
55 | 0 0 0
56 | 0
57 | 0
58 |
59 |
60 | 1
61 | 0
62 | 0
63 | 1
64 |
65 | 0
66 |
67 |
68 |
69 |
70 | 0
71 | 1e+06
72 |
73 |
74 | 0
75 | 1
76 | 1
77 |
78 | 0
79 | 0.2
80 | 1e+13
81 | 1
82 | 0.01
83 | 0
84 |
85 |
86 | 1
87 | -0.01
88 | 0
89 | 0.2
90 | 1e+13
91 | 1
92 |
93 |
94 |
95 |
96 |
97 | 1
98 | 1
99 |
100 |
101 |
--------------------------------------------------------------------------------
/model_editor_models/indoor/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | indoor
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/indoor_2/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | indoor_2
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/indoor_3/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | indoor_3
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/indoor_environment/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | indoor_environment
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/indoor_environment_4/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | indoor_environment_4
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/mw_5/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | mw_5
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/square_empty/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | square_empty
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/square_empty/model.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 1
6 |
7 |
8 |
9 |
10 | 20 20 0.1
11 |
12 |
13 |
14 |
15 | 0
16 |
17 |
18 | 20 20 0.1
19 |
20 |
21 |
22 |
27 |
28 |
29 |
30 | 4e-06 -1.1e-05 0 0 -0 0
31 |
32 |
33 | 1
34 |
35 |
36 |
37 |
38 | 20 20 0.1
39 |
40 |
41 |
42 |
43 | 0
44 |
45 |
46 | 20 20 0.1
47 |
48 |
49 |
50 |
55 |
56 |
57 |
58 | 4e-06 20 0 0 -0 0
59 |
60 |
61 |
62 | -8.81665 -2.69351 1.4 0 -0 0
63 |
64 |
65 | nan
66 | 0
67 | 0
68 | nan
69 | 0
70 | nan
71 |
72 | 0
73 | 0 0 0 0 -0 0
74 |
75 | 0
76 | 0
77 |
78 | 0
79 |
80 |
81 | 20.2056 0.556506 8.63552
82 |
83 |
84 |
85 |
90 | 1 1 1 1
91 | 1 1 1 1
92 | 0 0 0 1
93 | 0 0 0 1
94 |
95 | 0 0 0 0 -0 0
96 | 0
97 |
98 |
99 | 0
100 | 10
101 | 0 0 0 0 -0 0
102 |
103 |
104 | 20.2056 0.556506 8.63552
105 |
106 |
107 |
108 |
109 |
110 | 1
111 | 1
112 | 0 0 0
113 | 0
114 | 0
115 |
116 |
117 | 1
118 | 0
119 | 0
120 | 1
121 |
122 | 0
123 |
124 |
125 |
126 |
127 | 0
128 | 1e+06
129 |
130 |
131 | 0
132 | 1
133 | 1
134 |
135 | 0
136 | 0.2
137 | 1e+13
138 | 1
139 | 0.01
140 | 0
141 |
142 |
143 | 1
144 | -0.01
145 | 0
146 | 0.2
147 | 1e+13
148 | 1
149 |
150 |
151 |
152 |
153 |
154 | 1
155 | 1
156 | 9 33 0 0 -0 0
157 |
158 |
159 |
160 | -8.81665 -2.69351 1.4 0 -0 0
161 |
162 |
163 | nan
164 | 0
165 | 0
166 | nan
167 | 0
168 | nan
169 |
170 | 0
171 | 0 0 0 0 -0 0
172 |
173 | 0
174 | 0
175 |
176 | 0
177 |
178 |
179 | 20.2056 0.556506 8.63552
180 |
181 |
182 |
183 |
188 | 1 1 1 1
189 | 1 1 1 1
190 | 0 0 0 1
191 | 0 0 0 1
192 |
193 | 0 0 0 0 -0 0
194 | 0
195 |
196 |
197 | 0
198 | 10
199 | 0 0 0 0 -0 0
200 |
201 |
202 | 20.2056 0.556506 8.63552
203 |
204 |
205 |
206 |
207 |
208 | 1
209 | 1
210 | 0 0 0
211 | 0
212 | 0
213 |
214 |
215 | 1
216 | 0
217 | 0
218 | 1
219 |
220 | 0
221 |
222 |
223 |
224 |
225 | 0
226 | 1e+06
227 |
228 |
229 | 0
230 | 1
231 | 1
232 |
233 | 0
234 | 0.2
235 | 1e+13
236 | 1
237 | 0.01
238 | 0
239 |
240 |
241 | 1
242 | -0.01
243 | 0
244 | 0.2
245 | 1e+13
246 | 1
247 |
248 |
249 |
250 |
251 |
252 | 1
253 | 1
254 | 9 -7.53084 0 0 -0 0
255 |
256 |
257 |
258 | -8.81665 -2.69351 1.4 0 -0 0
259 |
260 |
261 | nan
262 | 0
263 | 0
264 | nan
265 | 0
266 | nan
267 |
268 | 0
269 | 0 0 0 0 -0 0
270 |
271 | 0
272 | 0
273 |
274 | 0
275 |
276 |
277 | 20.2056 0.556506 8.63552
278 |
279 |
280 |
281 |
286 | 1 1 1 1
287 | 1 1 1 1
288 | 0 0 0 1
289 | 0 0 0 1
290 |
291 | 0 0 0 0 -0 0
292 | 0
293 |
294 |
295 | 0
296 | 10
297 | 0 0 0 0 -0 0
298 |
299 |
300 | 20.2056 0.556506 8.63552
301 |
302 |
303 |
304 |
305 |
306 | 1
307 | 1
308 | 0 0 0
309 | 0
310 | 0
311 |
312 |
313 | 1
314 | 0
315 | 0
316 | 1
317 |
318 | 0
319 |
320 |
321 |
322 |
323 | 0
324 | 1e+06
325 |
326 |
327 | 0
328 | 1
329 | 1
330 |
331 | 0
332 | 0.2
333 | 1e+13
334 | 1
335 | 0.01
336 | 0
337 |
338 |
339 | 1
340 | -0.01
341 | 0
342 | 0.2
343 | 1e+13
344 | 1
345 |
346 |
347 |
348 |
349 |
350 | 1
351 | 1
352 | 12.7893 11.2013 0 0 0 -1.56937
353 |
354 |
355 |
356 | -8.81665 -2.69351 1.4 0 -0 0
357 |
358 |
359 | nan
360 | 0
361 | 0
362 | nan
363 | 0
364 | nan
365 |
366 | 0
367 | 0 0 0 0 -0 0
368 |
369 | 0
370 | 0
371 |
372 | 0
373 |
374 |
375 | 20.2056 0.556506 8.63552
376 |
377 |
378 |
379 |
384 | 1 1 1 1
385 | 1 1 1 1
386 | 0 0 0 1
387 | 0 0 0 1
388 |
389 | 0 0 0 0 -0 0
390 | 0
391 |
392 |
393 | 0
394 | 10
395 | 0 0 0 0 -0 0
396 |
397 |
398 | 20.2056 0.556506 8.63552
399 |
400 |
401 |
402 |
403 |
404 | 1
405 | 1
406 | 0 0 0
407 | 0
408 | 0
409 |
410 |
411 | 1
412 | 0
413 | 0
414 | 1
415 |
416 | 0
417 |
418 |
419 |
420 |
421 | 0
422 | 1e+06
423 |
424 |
425 | 0
426 | 1
427 | 1
428 |
429 | 0
430 | 0.2
431 | 1e+13
432 | 1
433 | 0.01
434 | 0
435 |
436 |
437 | 1
438 | -0.01
439 | 0
440 | 0.2
441 | 1e+13
442 | 1
443 |
444 |
445 |
446 |
447 |
448 | 1
449 | 1
450 | -7.52526 11 0 0 0 -1.56937
451 |
452 |
453 |
454 | -8.81665 -2.69351 1.4 0 -0 0
455 |
456 |
457 | nan
458 | 0
459 | 0
460 | nan
461 | 0
462 | nan
463 |
464 | 0
465 | 0 0 0 0 -0 0
466 |
467 | 0
468 | 0
469 |
470 | 0
471 |
472 |
473 | 20.2056 0.556506 8.63552
474 |
475 |
476 |
477 |
482 | 1 1 1 1
483 | 1 1 1 1
484 | 0 0 0 1
485 | 0 0 0 1
486 |
487 | 0 0 0 0 -0 0
488 | 0
489 |
490 |
491 | 0
492 | 10
493 | 0 0 0 0 -0 0
494 |
495 |
496 | 20.2056 0.556506 8.63552
497 |
498 |
499 |
500 |
501 |
502 | 1
503 | 1
504 | 0 0 0
505 | 0
506 | 0
507 |
508 |
509 | 1
510 | 0
511 | 0
512 | 1
513 |
514 | 0
515 |
516 |
517 |
518 |
519 | 0
520 | 1e+06
521 |
522 |
523 | 0
524 | 1
525 | 1
526 |
527 | 0
528 | 0.2
529 | 1e+13
530 | 1
531 | 0.01
532 | 0
533 |
534 |
535 | 1
536 | -0.01
537 | 0
538 | 0.2
539 | 1e+13
540 | 1
541 |
542 |
543 |
544 |
545 |
546 | 1
547 | 1
548 | -7.59528 -9.00001 0 0 0 -1.56937
549 |
550 |
551 |
552 | -8.81665 -2.69351 1.4 0 -0 0
553 |
554 |
555 | nan
556 | 0
557 | 0
558 | nan
559 | 0
560 | nan
561 |
562 | 0
563 | 0 0 0 0 -0 0
564 |
565 | 0
566 | 0
567 |
568 | 0
569 |
570 |
571 | 20.2056 0.556506 8.63552
572 |
573 |
574 |
575 |
580 | 1 1 1 1
581 | 1 1 1 1
582 | 0 0 0 1
583 | 0 0 0 1
584 |
585 | 0 0 0 0 -0 0
586 | 0
587 |
588 |
589 | 0
590 | 10
591 | 0 0 0 0 -0 0
592 |
593 |
594 | 20.2056 0.556506 8.63552
595 |
596 |
597 |
598 |
599 |
600 | 1
601 | 1
602 | 0 0 0
603 | 0
604 | 0
605 |
606 |
607 | 1
608 | 0
609 | 0
610 | 1
611 |
612 | 0
613 |
614 |
615 |
616 |
617 | 0
618 | 1e+06
619 |
620 |
621 | 0
622 | 1
623 | 1
624 |
625 | 0
626 | 0.2
627 | 1e+13
628 | 1
629 | 0.01
630 | 0
631 |
632 |
633 | 1
634 | -0.01
635 | 0
636 | 0.2
637 | 1e+13
638 | 1
639 |
640 |
641 |
642 |
643 |
644 | 1
645 | 1
646 | 13 -9.00001 0 0 0 -1.56937
647 |
648 | 0
649 | 1
650 |
651 |
652 |
--------------------------------------------------------------------------------
/model_editor_models/test_map/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | test_map
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/test_map_2/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | test_map_2
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/model_editor_models/test_map_3/model.config:
--------------------------------------------------------------------------------
1 |
2 |
3 | test_map_3
4 | 1.0
5 | model.sdf
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/path trajectory/MW_world_gazebo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/MW_world_gazebo.jpg
--------------------------------------------------------------------------------
/path trajectory/MW_world_rviz.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/MW_world_rviz.png
--------------------------------------------------------------------------------
/path trajectory/indoor_gazebo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/indoor_gazebo.jpg
--------------------------------------------------------------------------------
/path trajectory/indoor_gazebo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/indoor_gazebo.png
--------------------------------------------------------------------------------
/path trajectory/test_map.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/test_map.jpg
--------------------------------------------------------------------------------
/path trajectory/test_map2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/test_map2.jpg
--------------------------------------------------------------------------------
/path trajectory/test_map2_gazebo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/test_map2_gazebo.png
--------------------------------------------------------------------------------
/path trajectory/test_map2_gazebo2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/test_map2_gazebo2.png
--------------------------------------------------------------------------------
/path trajectory/test_map3_gazebo3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/test_map3_gazebo3.png
--------------------------------------------------------------------------------
/path trajectory/test_map_gazebo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/path trajectory/test_map_gazebo.png
--------------------------------------------------------------------------------
/real_world_test/D3QN_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | import warnings
5 | warnings.simplefilter(action='ignore', category=FutureWarning)
6 | import sys
7 | ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
8 | if ros_path in sys.path:
9 | sys.path.remove(ros_path)
10 |
11 | import cv2
12 | sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
13 |
14 |
15 | from keras.layers.convolutional import Conv2D
16 | from keras.layers import Input, Dense, Flatten, Lambda, add
17 | from keras.optimizers import RMSprop, Adam
18 | from keras.models import Sequential ,load_model, Model
19 | from keras.backend.tensorflow_backend import set_session
20 | from skimage.color import rgb2gray
21 | from collections import deque
22 | from keras import backend as K
23 | from sensor_msgs.msg import Image
24 | from gazebo_msgs.msg import ModelState, ModelStates
25 | from geometry_msgs.msg import Vector3Stamped, Twist
26 | from skimage.transform import resize
27 | from PIL import Image as iimage
28 | from keras.models import load_model
29 | from keras.utils.training_utils import multi_gpu_model
30 | from ENV import environment
31 | from drone_control import PID
32 | from std_msgs.msg import Empty
33 | from matplotlib import gridspec
34 | from matplotlib.figure import Figure
35 |
36 |
37 | import matplotlib.pyplot as plt
38 | import rospy
39 | import tensorflow as tf
40 | import scipy.misc
41 | import numpy as np
42 | import random
43 | import time
44 | import random
45 | import pickle
46 | import models
47 | import cv2
48 | import copy
49 |
50 |
51 | image = []
52 |
53 | def callback_camera(msg):
54 | global image
55 | img_height = msg.height
56 | img_width = msg.width
57 |
58 | image = np.frombuffer(msg.data, dtype=np.uint8)
59 | image = np.reshape(image, [img_height,img_width,3])
60 | image = np.array(image)
61 |
62 | def speed_plot(a,b):
63 | linear_speed = a
64 | angular_speed = b
65 |
66 | angular_list = (['angular_velocity'])
67 | linear_list = (['linear velocity'])
68 |
69 | x_pos = np.arange(len(angular_list))
70 | y_pos = np.arange(len(linear_list))
71 |
72 | fig = plt.figure(1)
73 | #plot figure with subplots of different sizes
74 | gridspec.GridSpec(6,6)
75 | #set up subplot grid
76 | plt.subplot2grid((6,6), (1,0), colspan=4, rowspan=2)
77 | plt.locator_params(axis = 'x', nbins = 5)
78 | plt.locator_params(axis = 'y', nbins = 5)
79 | plt.title('Angular (rad/s)')
80 | plt.xlabel('value')
81 | plt.barh(y_pos, [-0.6, 0.6], color = [1.0,1.0, 1.0])
82 | plt.barh(y_pos, angular_speed, color = 'r')
83 | plt.yticks([])
84 |
85 | plt.subplot2grid((6,6), (0,5), colspan=1, rowspan= 5)
86 | plt.locator_params(axis = 'x', nbins = 5)
87 | plt.locator_params(axis = 'y', nbins = 5)
88 | plt.title('Linear (m/s)')
89 | plt.xlabel('value')
90 | plt.bar(x_pos, [0, 0.2], color = [1.0, 1.0, 1.0])
91 | plt.bar(x_pos, linear_speed, color = 'b')
92 | plt.xticks([])
93 | fig.savefig('sample.png', dpi=fig.dpi, bbox_inches = 'tight')
94 |
95 |
96 | def realtime_plot_cv2(a,b):
97 | speed_plot(a,b)
98 | img = cv2.imread('sample.png', cv2.IMREAD_COLOR)
99 | cv2.imshow('control_pannel',img)
100 | cv2.waitKey(1)
101 |
102 |
103 | # load model
104 | g1 = tf.Graph()
105 | g2 = tf.Graph()
106 |
107 |
108 | with g1.as_default():
109 | height = 228
110 | width = 304
111 | channels = 3
112 | batch_size = 1
113 | input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))
114 |
115 | model_data_path = './NYU_FCRN-checkpoint/NYU_FCRN.ckpt'
116 |
117 | # Construct the network
118 | print('start create the session and model')
119 | net = models.ResNet50UpProj({'data': input_node}, batch_size, 1, False)
120 |
121 | cnn_init = tf.global_variables_initializer()
122 | cnn_config = tf.ConfigProto(allow_soft_placement = True)
123 | cnn_sess = tf.Session(config = cnn_config)
124 | cnn_sess.run(cnn_init)
125 | cnn_saver = tf.train.Saver()
126 | cnn_saver.restore(cnn_sess, model_data_path)
127 |
128 | print('Finishied')
129 |
130 | def get_depth(image):
131 | width = 304
132 | height = 228
133 |
134 | test_image = iimage.fromarray(image,'RGB')
135 | test_image = test_image.resize([width, height], iimage.ANTIALIAS)
136 | test_image = np.array(test_image).astype('float32')
137 | test_image = np.expand_dims(np.asarray(test_image), axis = 0)
138 | pred = cnn_sess.run(net.get_output(), feed_dict={input_node: test_image})
139 | pred = np.reshape(pred, [128,160])
140 | pred = np.array(pred, dtype=np.float32)
141 |
142 | pred[np.isnan(pred)] = 5.
143 | pred = pred / 3.5
144 | pred[pred>1.0] = 1.0
145 |
146 | return pred
147 |
148 | class TestAgent:
149 | def __init__(self, action_size):
150 | self.state_size = (128, 160 , 8)
151 | self.action_size = action_size
152 | self.model = self.build_model()
153 | self.config = tf.ConfigProto()
154 | self.sess = tf.InteractiveSession(config=self.config)
155 | self.sess.run(tf.global_variables_initializer())
156 | K.set_session(self.sess)
157 |
158 | def build_model(self):
159 | input = Input(shape=self.state_size)
160 | h1 = Conv2D(32, (10, 14), strides = 8, activation = "relu", name = "conv1")(input)
161 | h2 = Conv2D(64, (4, 4), strides = 2, activation = "relu", name = "conv2")(h1)
162 | h3 = Conv2D(64, (3, 3), strides = 1, activation = "relu", name = "conv3")(h2)
163 | context = Flatten(name = "flatten")(h3)
164 |
165 | value_hidden = Dense(512, activation = 'relu', name = 'value_fc')(context)
166 | value = Dense(1, name = "value")(value_hidden)
167 | action_hidden = Dense(512, activation = 'relu', name = 'action_fc')(context)
168 | action = Dense(self.action_size, name = "action")(action_hidden)
169 | action_mean = Lambda(lambda x: tf.reduce_mean(x, axis = 1, keepdims = True), name = 'action_mean')(action)
170 | output = Lambda(lambda x: x[0] + x[1] - x[2], name = 'output')([action, value, action_mean])
171 | model = Model(inputs = input, outputs = output)
172 | model.summary()
173 |
174 | return model
175 |
176 | def get_action(self, history):
177 | flag = False
178 | if np.random.random() < 0.001:
179 | flag = True
180 | return random.randrange(8), flag
181 | history = np.float32(history)
182 | q_value = self.model.predict(history)
183 | return np.argmax(q_value[0]), flag
184 |
185 | def load_model(self, filename):
186 | self.model.load_weights(filename)
187 |
188 | # Intial hovering for 2 seconds and collecting the data from the laser and camera
189 | # Receivng the data from laser and camera
190 | # checking the crash using crash_check function and if crash occurs, the simulation is reset
191 | with g2.as_default():
192 |
193 | if __name__ == '__main__':
194 | # Check the gazebo connection
195 | "Publisher"
196 | takeoff = rospy.Publisher('/bebop/takeoff',Empty, queue_size= 10)
197 | land = rospy.Publisher('/bebop/land', Empty, queue_size= 10)
198 |
199 | "Subscribe"
200 | # rospy.init_node('D3QN_TEST', anonymous=True)
201 | rospy.Subscriber('/bebop/image_raw', Image, callback_camera, queue_size = 10)
202 |
203 | # Parameter setting for the simulation
204 | agent = TestAgent(action_size = 8) ## class name should be different from the original one
205 | agent.load_model("./Saved_models/D3QN_V_17_single.h5")
206 | EPISODE = 100000
207 | global_step = 0
208 |
209 | env = environment()
210 | # Observe
211 | rospy.sleep(2.)
212 | e = 0
213 | rate = rospy.Rate(5)
214 | vel_pid = PID()
215 |
216 | while e < EPISODE and not rospy.is_shutdown():
217 | takeoff.publish()
218 | rospy.sleep(5)
219 |
220 | e = e + 1
221 | # get the initial state
222 | state = get_depth(image)
223 | history = np.stack((state, state, state, state,state, state, state, state), axis = 2)
224 | history = np.reshape([history], (1,128,160,8))
225 |
226 | step, score = 0. ,0.
227 | done = False
228 |
229 | while not done and not rospy.is_shutdown():
230 |
231 | global_step = global_step + 1
232 | step = step + 1
233 | # Receive the action command from the Q-network and do the action
234 | [action, flag] = agent.get_action(history)
235 | # give control_input
236 | [linear, angular] = vel_pid.velocity_control(action)
237 |
238 | # image preprocessing
239 | next_state = get_depth(image)
240 |
241 | # plot real time depth image
242 | aa = cv2.resize(next_state, (128*3, 160*3), interpolation = cv2.INTER_CUBIC)
243 | cv2.imshow('input image', aa)
244 | cv2.waitKey(1)
245 |
246 | #plot real time control input
247 | realtime_plot_cv2(linear, -angular)
248 |
249 | # image for collision check
250 | next_state = np.reshape([next_state],(1,128,160,1))
251 | next_history = np.append(next_state, history[:,:,:,:7],axis = 3)
252 |
253 | history = next_history
254 |
255 | if step >= 2000:
256 | done = True
257 | # Update the score
258 | rate.sleep()
259 |
260 | if done:
261 | print(score)
262 |
--------------------------------------------------------------------------------
/real_world_test/ENV.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[4]:
5 |
6 |
7 | from __future__ import print_function
8 | from geometry_msgs.msg import Twist
9 | from std_msgs.msg import Empty
10 | from drone_control import PID
11 |
12 | import numpy as np
13 | import rospy
14 |
15 |
16 | class environment():
17 | def __init__(self):
18 | self.vel_pid = PID()
19 |
20 | # set the action table
21 | self.cmd_vel = rospy.Publisher('/bebop/cmd_vel',Twist, queue_size=10)
22 | self.twist = Twist()
23 | self.speed = [0.3, 0.0]
24 | self.action_table = [0.1, 0.1, 0.0, 0.2, -0.2, 0.0, -0.10, 0.10]
25 |
26 | def control(self, action):
27 | if action < 3:
28 | self.speed[0] = self.action_table[action]
29 | self.speed[1] = 0.0
30 | self.twist = Twist()
31 | else:
32 | self.speed[1] = self.action_table[action]
33 | vel_command = self.vel_pid.velocity_control(self.speed)
34 |
35 | self.twist.linear.x = vel_command[0]
36 | self.twist.linear.y = vel_command[1]
37 | self.twist.linear.z = vel_command[2]
38 | self.twist.angular.x = vel_command[3]
39 | self.twist.angular.y = vel_command[4]
40 | self.twist.angular.z = vel_command[5]
41 | self.cmd_vel.publish(self.twist)
42 | return self.speed
43 |
--------------------------------------------------------------------------------
/real_world_test/Saved_models/D3QN_V_17_single.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/real_world_test/Saved_models/D3QN_V_17_single.h5
--------------------------------------------------------------------------------
/real_world_test/Saved_models/readme.md:
--------------------------------------------------------------------------------
1 | This file contains saved model of collision avoidance algorithm.
2 |
--------------------------------------------------------------------------------
/real_world_test/draw_control_input.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[13]:
5 |
6 | import warnings
7 | warnings.simplefilter(action='ignore', category=FutureWarning)
8 | import sys
9 | ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
10 | if ros_path in sys.path:
11 | sys.path.remove(ros_path)
12 |
13 | import cv2
14 | sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
15 |
16 | import numpy as np
17 | import matplotlib.pyplot as plt
18 | from matplotlib import gridspec
19 | from matplotlib.figure import Figure
20 | from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
21 | import matplotlib.animation as animation
22 |
23 | def speed_plot(a,b):
24 | # linear_speed = velocity_command.linear.x
25 | # angular_speed = velocity_command.angualr.z
26 | linear_speed = a
27 | angular_speed = b
28 |
29 | angular_list = (['angular_velocity'])
30 | linear_list = (['linear velocity'])
31 |
32 | x_pos = np.arange(len(angular_list))
33 | y_pos = np.arange(len(linear_list))
34 |
35 | fig = plt.figure(1)
36 | #plot figure with subplots of different sizes
37 | gridspec.GridSpec(6,6)
38 | #set up subplot grid
39 | plt.subplot2grid((6,6), (1,0), colspan=4, rowspan=2)
40 | plt.locator_params(axis = 'x', nbins = 5)
41 | plt.locator_params(axis = 'y', nbins = 5)
42 | plt.title('Angular (rad/s)')
43 | plt.xlabel('value')
44 | plt.barh(y_pos, [-0.5, 0.5], color = [1.0,1.0, 1.0])
45 | plt.barh(y_pos, angular_speed, color = 'r')
46 | plt.yticks([])
47 |
48 | plt.subplot2grid((6,6), (0,5), colspan=1, rowspan= 5)
49 | plt.locator_params(axis = 'x', nbins = 5)
50 | plt.locator_params(axis = 'y', nbins = 5)
51 | plt.title('Linear (m/s)')
52 | plt.xlabel('value')
53 | plt.bar(x_pos, [0, 0.5], color = [1.0, 1.0, 1.0])
54 | plt.bar(x_pos, linear_speed, color = 'b')
55 | plt.xticks([])
56 | fig.savefig('sample.png', dpi=fig.dpi, bbox_inches = 'tight')
57 |
58 |
59 | def realtime_plot_cv2(a,b):
60 | speed_plot(a,b)
61 | img = cv2.imread('sample.png', cv2.IMREAD_COLOR)
62 | cv2.imshow('control_pannel',img)
63 | cv2.waitKey(1)
64 |
65 | k = 0.01
66 | while(True):
67 | realtime_plot_cv2(1 + k,-k)
68 | k = k - 0.01
69 |
70 |
--------------------------------------------------------------------------------
/real_world_test/drone_control.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | import rospy
8 | import numpy as np
9 | from std_msgs.msg import Empty
10 | from nav_msgs.msg import Odometry
11 | from geometry_msgs.msg import Twist
12 |
13 | class PID():
14 | def __init__(self):
15 | # init_node
16 | rospy.init_node('sample', anonymous=False)
17 | self._error = 0.0 # previous error term
18 | self._integral = 0.0 # previous integral term
19 | self.delta_t = 0.2 # command frequency
20 | self._velocity_x = 0.0
21 | self._velocity_y = 0.0
22 | self._velocity_z = 0.0
23 | self._angular_x = 0.0
24 | self._angular_y = 0.0
25 | self._angular_z = 0.0
26 |
27 | # publisher
28 | self.cmd_vel = rospy.Publisher('/bebop/cmd_vel',Twist, queue_size=10)
29 | self.twist = Twist()
30 | # subscribe velocity topics
31 | self.velocity = rospy.Subscriber('/bebop/odom', Odometry, self.velocity_callback)
32 |
33 | #action table
34 | self.speed = [0.1, 0.0]
35 | self.action_table = [0.4, 0.1, 0.0, 0.3, -0.3, 0.0, -0.15, 0.15]
36 | def get_velocity(self):
37 | return [self.velocity_x, self.velocity_y]
38 |
39 | def velocity_control(self, action):
40 | desired_control = self.action_decision(action)
41 |
42 | self.desired_x = desired_control[0]
43 | self.desired_y = 0.0
44 | self.desired_z = 0.0
45 | self.desired_ax = 0.0
46 | self.desired_ay = 0.0
47 | self.desired_az = desired_control[1]
48 |
49 | # define PID gain
50 | self.Kp = 1.2
51 | self.Kd = 0.05
52 |
53 | # error define
54 | error_x = self.desired_x - self.velocity_x
55 | error_y = self.desired_y - self.velocity_y
56 | error_z = self.desired_z - self.velocity_z
57 | error_ax = self.desired_ax - self.angular_x
58 | error_ay = self.desired_ay - self.angular_y
59 | error_az = self.desired_az - self.angular_z
60 |
61 | # PID control part
62 | u_x = self.Kp * error_x + self.Kd * (self.velocity_x - self._velocity_x) / self.delta_t
63 | u_y = self.Kp * error_y + self.Kd * (self.velocity_y - self._velocity_y) / self.delta_t
64 | u_z = self.Kp * error_z + self.Kd * (self.velocity_z - self._velocity_z) / self.delta_t
65 |
66 | a_x = self.Kp * error_ax + self.Kd * (self.angular_x - self._angular_x) / self.delta_t
67 | a_y = self.Kp * error_ay + self.Kd * (self.angular_y - self._angular_y) / self.delta_t
68 | a_z = self.Kp * error_az + self.Kd * (self.angular_z - self._angular_z) / self.delta_t
69 |
70 |
71 | self_velocity_x = self.velocity_x
72 | self_velocity_y = self.velocity_y
73 | self_velocity_z = self.velocity_z
74 | self_angular_x = self.angular_x
75 | self_angular_y = self.angular_y
76 | self_angular_z = self.angular_z
77 |
78 | self.twist.linear.x = u_x
79 | self.twist.linear.y = u_y
80 | self.twist.linear.z = u_z
81 | self.twist.angular.x = a_x
82 | self.twist.angular.y = a_y
83 | self.twist.angular.z = a_z
84 | self.cmd_vel.publish(self.twist)
85 | return u_x, a_z
86 |
87 | def action_decision(self, action):
88 | if action < 3:
89 | self.speed[0] = self.action_table[action]
90 | self.speed[1] = 0.0
91 | else:
92 | self.speed[1] = self.action_table[action]
93 |
94 | return self.speed
95 |
96 |
97 | def velocity_callback(self, msg):
98 | self.velocity_x = msg.twist.twist.linear.x
99 | self.velocity_y = msg.twist.twist.linear.y
100 | self.velocity_z = msg.twist.twist.linear.z
101 | self.angular_x = msg.twist.twist.angular.x
102 | self.angular_y = msg.twist.twist.angular.y
103 | self.angular_z = msg.twist.twist.angular.z
104 |
105 |
--------------------------------------------------------------------------------
/real_world_test/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .fcrn import ResNet50UpProj
2 |
--------------------------------------------------------------------------------
/real_world_test/models/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/real_world_test/models/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/real_world_test/models/__pycache__/fcrn.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/real_world_test/models/__pycache__/fcrn.cpython-35.pyc
--------------------------------------------------------------------------------
/real_world_test/models/__pycache__/network.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/real_world_test/models/__pycache__/network.cpython-35.pyc
--------------------------------------------------------------------------------
/real_world_test/models/fcrn.py:
--------------------------------------------------------------------------------
1 | from .network import Network
2 |
3 | class ResNet50UpProj(Network):
4 | def setup(self):
5 | (self.feed('data')
6 | .conv(7, 7, 64, 2, 2, relu=False, name='conv1')
7 | .batch_normalization(relu=True, name='bn_conv1')
8 | .max_pool(3, 3, 2, 2, name='pool1')
9 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
10 | .batch_normalization(name='bn2a_branch1'))
11 |
12 | (self.feed('pool1')
13 | .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
14 | .batch_normalization(relu=True, name='bn2a_branch2a')
15 | .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
16 | .batch_normalization(relu=True, name='bn2a_branch2b')
17 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
18 | .batch_normalization(name='bn2a_branch2c'))
19 |
20 | (self.feed('bn2a_branch1',
21 | 'bn2a_branch2c')
22 | .add(name='res2a')
23 | .relu(name='res2a_relu')
24 | .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
25 | .batch_normalization(relu=True, name='bn2b_branch2a')
26 | .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
27 | .batch_normalization(relu=True, name='bn2b_branch2b')
28 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
29 | .batch_normalization(name='bn2b_branch2c'))
30 |
31 | (self.feed('res2a_relu',
32 | 'bn2b_branch2c')
33 | .add(name='res2b')
34 | .relu(name='res2b_relu')
35 | .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
36 | .batch_normalization(relu=True, name='bn2c_branch2a')
37 | .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
38 | .batch_normalization(relu=True, name='bn2c_branch2b')
39 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
40 | .batch_normalization(name='bn2c_branch2c'))
41 |
42 | (self.feed('res2b_relu',
43 | 'bn2c_branch2c')
44 | .add(name='res2c')
45 | .relu(name='res2c_relu')
46 | .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1')
47 | .batch_normalization(name='bn3a_branch1'))
48 |
49 | (self.feed('res2c_relu')
50 | .conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a')
51 | .batch_normalization(relu=True, name='bn3a_branch2a')
52 | .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
53 | .batch_normalization(relu=True, name='bn3a_branch2b')
54 | .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
55 | .batch_normalization(name='bn3a_branch2c'))
56 |
57 | (self.feed('bn3a_branch1',
58 | 'bn3a_branch2c')
59 | .add(name='res3a')
60 | .relu(name='res3a_relu')
61 | .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')
62 | .batch_normalization(relu=True, name='bn3b_branch2a')
63 | .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')
64 | .batch_normalization(relu=True, name='bn3b_branch2b')
65 | .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')
66 | .batch_normalization(name='bn3b_branch2c'))
67 |
68 | (self.feed('res3a_relu',
69 | 'bn3b_branch2c')
70 | .add(name='res3b')
71 | .relu(name='res3b_relu')
72 | .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')
73 | .batch_normalization(relu=True, name='bn3c_branch2a')
74 | .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')
75 | .batch_normalization(relu=True, name='bn3c_branch2b')
76 | .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')
77 | .batch_normalization(name='bn3c_branch2c'))
78 |
79 | (self.feed('res3b_relu',
80 | 'bn3c_branch2c')
81 | .add(name='res3c')
82 | .relu(name='res3c_relu')
83 | .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')
84 | .batch_normalization(relu=True, name='bn3d_branch2a')
85 | .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')
86 | .batch_normalization(relu=True, name='bn3d_branch2b')
87 | .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')
88 | .batch_normalization(name='bn3d_branch2c'))
89 |
90 | (self.feed('res3c_relu',
91 | 'bn3d_branch2c')
92 | .add(name='res3d')
93 | .relu(name='res3d_relu')
94 | .conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1')
95 | .batch_normalization(name='bn4a_branch1'))
96 |
97 | (self.feed('res3d_relu')
98 | .conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a')
99 | .batch_normalization(relu=True, name='bn4a_branch2a')
100 | .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
101 | .batch_normalization(relu=True, name='bn4a_branch2b')
102 | .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
103 | .batch_normalization(name='bn4a_branch2c'))
104 |
105 | (self.feed('bn4a_branch1',
106 | 'bn4a_branch2c')
107 | .add(name='res4a')
108 | .relu(name='res4a_relu')
109 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')
110 | .batch_normalization(relu=True, name='bn4b_branch2a')
111 | .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')
112 | .batch_normalization(relu=True, name='bn4b_branch2b')
113 | .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')
114 | .batch_normalization(name='bn4b_branch2c'))
115 |
116 | (self.feed('res4a_relu',
117 | 'bn4b_branch2c')
118 | .add(name='res4b')
119 | .relu(name='res4b_relu')
120 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')
121 | .batch_normalization(relu=True, name='bn4c_branch2a')
122 | .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')
123 | .batch_normalization(relu=True, name='bn4c_branch2b')
124 | .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')
125 | .batch_normalization(name='bn4c_branch2c'))
126 |
127 | (self.feed('res4b_relu',
128 | 'bn4c_branch2c')
129 | .add(name='res4c')
130 | .relu(name='res4c_relu')
131 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')
132 | .batch_normalization(relu=True, name='bn4d_branch2a')
133 | .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')
134 | .batch_normalization(relu=True, name='bn4d_branch2b')
135 | .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')
136 | .batch_normalization(name='bn4d_branch2c'))
137 |
138 | (self.feed('res4c_relu',
139 | 'bn4d_branch2c')
140 | .add(name='res4d')
141 | .relu(name='res4d_relu')
142 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')
143 | .batch_normalization(relu=True, name='bn4e_branch2a')
144 | .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')
145 | .batch_normalization(relu=True, name='bn4e_branch2b')
146 | .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')
147 | .batch_normalization(name='bn4e_branch2c'))
148 |
149 | (self.feed('res4d_relu',
150 | 'bn4e_branch2c')
151 | .add(name='res4e')
152 | .relu(name='res4e_relu')
153 | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')
154 | .batch_normalization(relu=True, name='bn4f_branch2a')
155 | .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')
156 | .batch_normalization(relu=True, name='bn4f_branch2b')
157 | .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')
158 | .batch_normalization(name='bn4f_branch2c'))
159 |
160 | (self.feed('res4e_relu',
161 | 'bn4f_branch2c')
162 | .add(name='res4f')
163 | .relu(name='res4f_relu')
164 | .conv(1, 1, 2048, 2, 2, biased=False, relu=False, name='res5a_branch1')
165 | .batch_normalization(name='bn5a_branch1'))
166 |
167 | (self.feed('res4f_relu')
168 | .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a')
169 | .batch_normalization(relu=True, name='bn5a_branch2a')
170 | .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b')
171 | .batch_normalization(relu=True, name='bn5a_branch2b')
172 | .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
173 | .batch_normalization(name='bn5a_branch2c'))
174 |
175 | (self.feed('bn5a_branch1',
176 | 'bn5a_branch2c')
177 | .add(name='res5a')
178 | .relu(name='res5a_relu')
179 | .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
180 | .batch_normalization(relu=True, name='bn5b_branch2a')
181 | .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b')
182 | .batch_normalization(relu=True, name='bn5b_branch2b')
183 | .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
184 | .batch_normalization(name='bn5b_branch2c'))
185 |
186 | (self.feed('res5a_relu',
187 | 'bn5b_branch2c')
188 | .add(name='res5b')
189 | .relu(name='res5b_relu')
190 | .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
191 | .batch_normalization(relu=True, name='bn5c_branch2a')
192 | .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b')
193 | .batch_normalization(relu=True, name='bn5c_branch2b')
194 | .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
195 | .batch_normalization(name='bn5c_branch2c'))
196 |
197 | (self.feed('res5b_relu',
198 | 'bn5c_branch2c')
199 | .add(name='res5c')
200 | .relu(name='res5c_relu')
201 | .conv(1, 1, 1024, 1, 1, biased=True, relu=False, name='layer1')
202 | .batch_normalization(relu=False, name='layer1_BN')
203 | .up_project([3, 3, 1024, 512], id = '2x', stride = 1, BN=True)
204 | .up_project([3, 3, 512, 256], id = '4x', stride = 1, BN=True)
205 | .up_project([3, 3, 256, 128], id = '8x', stride = 1, BN=True)
206 | .up_project([3, 3, 128, 64], id = '16x', stride = 1, BN=True)
207 | .dropout(name = 'drop', keep_prob = 1.)
208 | .conv(3, 3, 1, 1, 1, name = 'ConvPred'))
209 |
--------------------------------------------------------------------------------
/real_world_test/models/network.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 |
4 | # ----------------------------------------------------------------------------------
5 | # Commonly used layers and operations based on ethereon's implementation
6 | # https://github.com/ethereon/caffe-tensorflow
7 | # Slight modifications may apply. FCRN-specific operations have also been appended.
8 | # ----------------------------------------------------------------------------------
9 | # Thanks to *Helisa Dhamo* for the model conversion and integration into TensorFlow.
10 | # ----------------------------------------------------------------------------------
11 |
12 | DEFAULT_PADDING = 'SAME'
13 |
14 |
15 | def get_incoming_shape(incoming):
16 | """ Returns the incoming data shape """
17 | if isinstance(incoming, tf.Tensor):
18 | return incoming.get_shape().as_list()
19 | elif type(incoming) in [np.array, list, tuple]:
20 | return np.shape(incoming)
21 | else:
22 | raise Exception("Invalid incoming layer.")
23 |
24 |
25 | def interleave(tensors, axis):
26 | old_shape = get_incoming_shape(tensors[0])[1:]
27 | new_shape = [-1] + old_shape
28 | new_shape[axis] *= len(tensors)
29 | return tf.reshape(tf.stack(tensors, axis + 1), new_shape)
30 |
31 | def layer(op):
32 | '''Decorator for composable network layers.'''
33 |
34 | def layer_decorated(self, *args, **kwargs):
35 | # Automatically set a name if not provided.
36 | name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
37 |
38 | # Figure out the layer inputs.
39 | if len(self.terminals) == 0:
40 | raise RuntimeError('No input variables found for layer %s.' % name)
41 | elif len(self.terminals) == 1:
42 | layer_input = self.terminals[0]
43 | else:
44 | layer_input = list(self.terminals)
45 | # Perform the operation and get the output.
46 | layer_output = op(self, layer_input, *args, **kwargs)
47 | # Add to layer LUT.
48 | self.layers[name] = layer_output
49 | # This output is now the input for the next layer.
50 | self.feed(layer_output)
51 | # Return self for chained calls.
52 | return self
53 |
54 | return layer_decorated
55 |
56 |
57 | class Network(object):
58 |
59 | def __init__(self, inputs, batch, keep_prob, is_training, trainable = True):
60 | # The input nodes for this network
61 | self.inputs = inputs
62 | # The current list of terminal nodes
63 | self.terminals = []
64 | # Mapping from layer names to layers
65 | self.layers = dict(inputs)
66 | # If true, the resulting variables are set as trainable
67 | self.trainable = trainable
68 | self.batch_size = batch
69 | self.keep_prob = keep_prob
70 | self.is_training = is_training
71 | self.setup()
72 |
73 |
74 | def setup(self):
75 | '''Construct the network. '''
76 | raise NotImplementedError('Must be implemented by the subclass.')
77 |
78 | def load(self, data_path, session, ignore_missing=False):
79 | '''Load network weights.
80 | data_path: The path to the numpy-serialized network weights
81 | session: The current TensorFlow session
82 | ignore_missing: If true, serialized weights for missing layers are ignored.
83 | '''
84 | data_dict = np.load(data_path, encoding='latin1').item()
85 | for op_name in data_dict:
86 | with tf.variable_scope(op_name, reuse=True):
87 | for param_name, data in iter(data_dict[op_name].items()):
88 | try:
89 | var = tf.get_variable(param_name)
90 | session.run(var.assign(data))
91 |
92 | except ValueError:
93 | if not ignore_missing:
94 | raise
95 |
96 | def feed(self, *args):
97 | '''Set the input(s) for the next operation by replacing the terminal nodes.
98 | The arguments can be either layer names or the actual layers.
99 | '''
100 | assert len(args) != 0
101 | self.terminals = []
102 | for fed_layer in args:
103 | if isinstance(fed_layer, str):
104 | try:
105 | fed_layer = self.layers[fed_layer]
106 | except KeyError:
107 | raise KeyError('Unknown layer name fed: %s' % fed_layer)
108 | self.terminals.append(fed_layer)
109 | return self
110 |
111 | def get_output(self):
112 | '''Returns the current network output.'''
113 | return self.terminals[-1]
114 |
115 | def get_layer_output(self, name):
116 | return self.layers[name]
117 |
118 | def get_unique_name(self, prefix):
119 | '''Returns an index-suffixed unique name for the given prefix.
120 | This is used for auto-generating layer names based on the type-prefix.
121 | '''
122 | ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
123 | return '%s_%d' % (prefix, ident)
124 |
125 | def make_var(self, name, shape):
126 | '''Creates a new TensorFlow variable.'''
127 | return tf.get_variable(name, shape, dtype = 'float32', trainable=self.trainable)
128 |
129 | def validate_padding(self, padding):
130 | '''Verifies that the padding is one of the supported ones.'''
131 | assert padding in ('SAME', 'VALID')
132 |
133 | @layer
134 | def conv(self,
135 | input_data,
136 | k_h,
137 | k_w,
138 | c_o,
139 | s_h,
140 | s_w,
141 | name,
142 | relu=True,
143 | padding=DEFAULT_PADDING,
144 | group=1,
145 | biased=True):
146 |
147 | # Verify that the padding is acceptable
148 | self.validate_padding(padding)
149 | # Get the number of channels in the input
150 | c_i = input_data.get_shape()[-1]
151 |
152 | if (padding == 'SAME'):
153 | input_data = tf.pad(input_data, [[0, 0], [(k_h - 1)//2, (k_h - 1)//2], [(k_w - 1)//2, (k_w - 1)//2], [0, 0]], "CONSTANT")
154 |
155 | # Verify that the grouping parameter is valid
156 | assert c_i % group == 0
157 | assert c_o % group == 0
158 | # Convolution for a given input and kernel
159 | convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding='VALID')
160 |
161 | with tf.variable_scope(name) as scope:
162 | kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
163 |
164 | if group == 1:
165 | # This is the common-case. Convolve the input without any further complications.
166 | output = convolve(input_data, kernel)
167 | else:
168 | # Split the input into groups and then convolve each of them independently
169 |
170 | input_groups = tf.split(3, group, input_data)
171 | kernel_groups = tf.split(3, group, kernel)
172 | output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
173 | # Concatenate the groups
174 | output = tf.concat(3, output_groups)
175 |
176 | # Add the biases
177 | if biased:
178 | biases = self.make_var('biases', [c_o])
179 | output = tf.nn.bias_add(output, biases)
180 | if relu:
181 | # ReLU non-linearity
182 | output = tf.nn.relu(output, name=scope.name)
183 |
184 | return output
185 |
186 | @layer
187 | def relu(self, input_data, name):
188 | return tf.nn.relu(input_data, name=name)
189 |
190 | @layer
191 | def max_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
192 | self.validate_padding(padding)
193 | return tf.nn.max_pool(input_data,
194 | ksize=[1, k_h, k_w, 1],
195 | strides=[1, s_h, s_w, 1],
196 | padding=padding,
197 | name=name)
198 |
199 | @layer
200 | def avg_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
201 | self.validate_padding(padding)
202 | return tf.nn.avg_pool(input_data,
203 | ksize=[1, k_h, k_w, 1],
204 | strides=[1, s_h, s_w, 1],
205 | padding=padding,
206 | name=name)
207 |
208 | @layer
209 | def lrn(self, input_data, radius, alpha, beta, name, bias=1.0):
210 | return tf.nn.local_response_normalization(input_data,
211 | depth_radius=radius,
212 | alpha=alpha,
213 | beta=beta,
214 | bias=bias,
215 | name=name)
216 |
217 | @layer
218 | def concat(self, inputs, axis, name):
219 | return tf.concat(concat_dim=axis, values=inputs, name=name)
220 |
221 | @layer
222 | def add(self, inputs, name):
223 | return tf.add_n(inputs, name=name)
224 |
225 | @layer
226 | def fc(self, input_data, num_out, name, relu=True):
227 | with tf.variable_scope(name) as scope:
228 | input_shape = input_data.get_shape()
229 | if input_shape.ndims == 4:
230 | # The input is spatial. Vectorize it first.
231 | dim = 1
232 | for d in input_shape[1:].as_list():
233 | dim *= d
234 | feed_in = tf.reshape(input_data, [-1, dim])
235 | else:
236 | feed_in, dim = (input_data, input_shape[-1].value)
237 | weights = self.make_var('weights', shape=[dim, num_out])
238 | biases = self.make_var('biases', [num_out])
239 | op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
240 | fc = op(feed_in, weights, biases, name=scope.name)
241 | return fc
242 |
243 | @layer
244 | def softmax(self, input_data, name):
245 | input_shape = map(lambda v: v.value, input_data.get_shape())
246 | if len(input_shape) > 2:
247 | # For certain models (like NiN), the singleton spatial dimensions
248 | # need to be explicitly squeezed, since they're not broadcast-able
249 | # in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
250 | if input_shape[1] == 1 and input_shape[2] == 1:
251 | input_data = tf.squeeze(input_data, squeeze_dims=[1, 2])
252 | else:
253 | raise ValueError('Rank 2 tensor input expected for softmax!')
254 | return tf.nn.softmax(input_data, name)
255 |
256 | @layer
257 | def batch_normalization(self, input_data, name, scale_offset=True, relu=False):
258 |
259 | with tf.variable_scope(name) as scope:
260 | shape = [input_data.get_shape()[-1]]
261 | pop_mean = tf.get_variable("mean", shape, initializer = tf.constant_initializer(0.0), trainable=False)
262 | pop_var = tf.get_variable("variance", shape, initializer = tf.constant_initializer(1.0), trainable=False)
263 | epsilon = 1e-4
264 | decay = 0.999
265 | if scale_offset:
266 | scale = tf.get_variable("scale", shape, initializer = tf.constant_initializer(1.0))
267 | offset = tf.get_variable("offset", shape, initializer = tf.constant_initializer(0.0))
268 | else:
269 | scale, offset = (None, None)
270 | if self.is_training:
271 | batch_mean, batch_var = tf.nn.moments(input_data, [0, 1, 2])
272 |
273 | train_mean = tf.assign(pop_mean,
274 | pop_mean * decay + batch_mean * (1 - decay))
275 | train_var = tf.assign(pop_var,
276 | pop_var * decay + batch_var * (1 - decay))
277 | with tf.control_dependencies([train_mean, train_var]):
278 | output = tf.nn.batch_normalization(input_data,
279 | batch_mean, batch_var, offset, scale, epsilon, name = name)
280 | else:
281 | output = tf.nn.batch_normalization(input_data,
282 | pop_mean, pop_var, offset, scale, epsilon, name = name)
283 |
284 | if relu:
285 | output = tf.nn.relu(output)
286 |
287 | return output
288 |
289 | @layer
290 | def dropout(self, input_data, keep_prob, name):
291 | return tf.nn.dropout(input_data, keep_prob, name=name)
292 |
293 |
294 | def unpool_as_conv(self, size, input_data, id, stride = 1, ReLU = False, BN = True):
295 |
296 | # Model upconvolutions (unpooling + convolution) as interleaving feature
297 | # maps of four convolutions (A,B,C,D). Building block for up-projections.
298 |
299 |
300 | # Convolution A (3x3)
301 | # --------------------------------------------------
302 | layerName = "layer%s_ConvA" % (id)
303 | self.feed(input_data)
304 | self.conv( 3, 3, size[3], stride, stride, name = layerName, padding = 'SAME', relu = False)
305 | outputA = self.get_output()
306 |
307 | # Convolution B (2x3)
308 | # --------------------------------------------------
309 | layerName = "layer%s_ConvB" % (id)
310 | padded_input_B = tf.pad(input_data, [[0, 0], [1, 0], [1, 1], [0, 0]], "CONSTANT")
311 | self.feed(padded_input_B)
312 | self.conv(2, 3, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
313 | outputB = self.get_output()
314 |
315 | # Convolution C (3x2)
316 | # --------------------------------------------------
317 | layerName = "layer%s_ConvC" % (id)
318 | padded_input_C = tf.pad(input_data, [[0, 0], [1, 1], [1, 0], [0, 0]], "CONSTANT")
319 | self.feed(padded_input_C)
320 | self.conv(3, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
321 | outputC = self.get_output()
322 |
323 | # Convolution D (2x2)
324 | # --------------------------------------------------
325 | layerName = "layer%s_ConvD" % (id)
326 | padded_input_D = tf.pad(input_data, [[0, 0], [1, 0], [1, 0], [0, 0]], "CONSTANT")
327 | self.feed(padded_input_D)
328 | self.conv(2, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
329 | outputD = self.get_output()
330 |
331 | # Interleaving elements of the four feature maps
332 | # --------------------------------------------------
333 | left = interleave([outputA, outputB], axis=1) # columns
334 | right = interleave([outputC, outputD], axis=1) # columns
335 | Y = interleave([left, right], axis=2) # rows
336 |
337 | if BN:
338 | layerName = "layer%s_BN" % (id)
339 | self.feed(Y)
340 | self.batch_normalization(name = layerName, scale_offset = True, relu = False)
341 | Y = self.get_output()
342 |
343 | if ReLU:
344 | Y = tf.nn.relu(Y, name = layerName)
345 |
346 | return Y
347 |
348 |
349 | def up_project(self, size, id, stride = 1, BN = True):
350 |
351 | # Create residual upsampling layer (UpProjection)
352 |
353 | input_data = self.get_output()
354 |
355 | # Branch 1
356 | id_br1 = "%s_br1" % (id)
357 |
358 | # Interleaving Convs of 1st branch
359 | out = self.unpool_as_conv(size, input_data, id_br1, stride, ReLU=True, BN=True)
360 |
361 | # Convolution following the upProjection on the 1st branch
362 | layerName = "layer%s_Conv" % (id)
363 | self.feed(out)
364 | self.conv(size[0], size[1], size[3], stride, stride, name = layerName, relu = False)
365 |
366 | if BN:
367 | layerName = "layer%s_BN" % (id)
368 | self.batch_normalization(name = layerName, scale_offset=True, relu = False)
369 |
370 | # Output of 1st branch
371 | branch1_output = self.get_output()
372 |
373 |
374 | # Branch 2
375 | id_br2 = "%s_br2" % (id)
376 | # Interleaving convolutions and output of 2nd branch
377 | branch2_output = self.unpool_as_conv(size, input_data, id_br2, stride, ReLU=False)
378 |
379 |
380 | # sum branches
381 | layerName = "layer%s_Sum" % (id)
382 | output = tf.add_n([branch1_output, branch2_output], name = layerName)
383 | # ReLU
384 | layerName = "layer%s_ReLU" % (id)
385 | output = tf.nn.relu(output, name=layerName)
386 |
387 | self.feed(output)
388 | return self
389 |
--------------------------------------------------------------------------------
/real_world_test/readme.md:
--------------------------------------------------------------------------------
1 | ### Depth Estimation
2 | - Tensorflow version == 1.12.0
3 | - Depth Estimation model is based on ResNet 50 architecture
4 | - python file that contains the model architecture is located in **models**
5 | - Due to huge size of trained depth estimation model, you have to download the depth estimation model [here](https://github.com/iro-cp/FCRN-DepthPrediction).
6 |
7 | ### How to run this code?
8 | - Run the D3QN_test.py
9 |
10 |
--------------------------------------------------------------------------------
/save_model/D3QN_V3.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/save_model/D3QN_V3.h5
--------------------------------------------------------------------------------
/save_model/D3QN_V_3_single.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/save_model/D3QN_V_3_single.h5
--------------------------------------------------------------------------------
/save_model/est_model_V1 (1).h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/save_model/est_model_V1 (1).h5
--------------------------------------------------------------------------------
/save_model/est_model_V1.json:
--------------------------------------------------------------------------------
1 | {"config": {"layers": [{"class_name": "InputLayer", "inbound_nodes": [], "config": {"sparse": false, "batch_input_shape": [null, 6, 128, 160, 1], "dtype": "float32", "name": "input_1"}, "name": "input_1"}, {"class_name": "ConvLSTM2D", "inbound_nodes": [[["input_1", 0, 0, {}]]], "config": {"go_backwards": false, "unit_forget_bias": true, "unroll": false, "implementation": 0, "dilation_rate": [1, 1], "name": "conv_lst_m2d_1", "strides": [2, 2], "kernel_size": [3, 3], "data_format": "channels_last", "bias_regularizer": null, "recurrent_constraint": null, "bias_constraint": null, "kernel_constraint": null, "recurrent_dropout": 0.0, "padding": "same", "stateful": false, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "kernel_regularizer": null, "recurrent_regularizer": null, "activation": "tanh", "return_state": false, "recurrent_activation": "hard_sigmoid", "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "filters": 20, "use_bias": true, "dropout": 0.0, "return_sequences": true, "activity_regularizer": null, "trainable": true}, "name": "conv_lst_m2d_1"}, {"class_name": "BatchNormalization", "inbound_nodes": [[["conv_lst_m2d_1", 0, 0, {}]]], "config": {"name": "batch_normalization_1", "gamma_constraint": null, "scale": true, "beta_regularizer": null, "beta_initializer": {"class_name": "Zeros", "config": {}}, "momentum": 0.99, "axis": -1, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "center": true, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "gamma_initializer": {"class_name": "Ones", "config": {}}, "gamma_regularizer": null, "epsilon": 0.001, "trainable": true, "beta_constraint": null}, "name": "batch_normalization_1"}, {"class_name": "MaxPooling3D", "inbound_nodes": [[["batch_normalization_1", 0, 0, {}]]], "config": {"pool_size": [1, 2, 2], "name": "max_pooling3d_1", "strides": [1, 2, 2], "data_format": "channels_last", "padding": "same", "trainable": true}, "name": "max_pooling3d_1"}, {"class_name": "ConvLSTM2D", "inbound_nodes": [[["max_pooling3d_1", 0, 0, {}]]], "config": {"go_backwards": false, "unit_forget_bias": true, "unroll": false, "implementation": 0, "dilation_rate": [1, 1], "name": "conv_lst_m2d_2", "strides": [2, 2], "kernel_size": [3, 3], "data_format": "channels_last", "bias_regularizer": null, "recurrent_constraint": null, "bias_constraint": null, "kernel_constraint": null, "recurrent_dropout": 0.0, "padding": "same", "stateful": false, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "kernel_regularizer": null, "recurrent_regularizer": null, "activation": "relu", "return_state": false, "recurrent_activation": "hard_sigmoid", "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "filters": 10, "use_bias": true, "dropout": 0.0, "return_sequences": true, "activity_regularizer": null, "trainable": true}, "name": "conv_lst_m2d_2"}, {"class_name": "BatchNormalization", "inbound_nodes": [[["conv_lst_m2d_2", 0, 0, {}]]], "config": {"name": "batch_normalization_2", "gamma_constraint": null, "scale": true, "beta_regularizer": null, "beta_initializer": {"class_name": "Zeros", "config": {}}, "momentum": 0.99, "axis": -1, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "center": true, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "gamma_initializer": {"class_name": "Ones", "config": {}}, "gamma_regularizer": null, "epsilon": 0.001, "trainable": true, "beta_constraint": null}, "name": "batch_normalization_2"}, {"class_name": "MaxPooling3D", "inbound_nodes": [[["batch_normalization_2", 0, 0, {}]]], "config": {"pool_size": [1, 3, 3], "name": "max_pooling3d_2", "strides": [1, 3, 3], "data_format": "channels_last", "padding": "same", "trainable": true}, "name": "max_pooling3d_2"}, {"class_name": "TimeDistributed", "inbound_nodes": [[["max_pooling3d_2", 0, 0, {}]]], "config": {"trainable": true, "layer": {"class_name": "Flatten", "config": {"trainable": true, "name": "flatten_1"}}, "name": "time_distributed_1"}, "name": "time_distributed_1"}, {"class_name": "TimeDistributed", "inbound_nodes": [[["time_distributed_1", 0, 0, {}]]], "config": {"trainable": true, "layer": {"class_name": "Dense", "config": {"units": 512, "activation": "linear", "name": "dense_1", "kernel_regularizer": null, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_regularizer": null, "bias_constraint": null, "kernel_constraint": null, "use_bias": true, "activity_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "trainable": true}}, "name": "time_distributed_2"}, "name": "time_distributed_2"}, {"class_name": "TimeDistributed", "inbound_nodes": [[["time_distributed_2", 0, 0, {}]]], "config": {"trainable": true, "layer": {"class_name": "Dense", "config": {"units": 1, "activation": "sigmoid", "name": "dense_2", "kernel_regularizer": null, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_regularizer": null, "bias_constraint": null, "kernel_constraint": null, "use_bias": true, "activity_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "trainable": true}}, "name": "time_distributed_3"}, "name": "time_distributed_3"}], "output_layers": [["time_distributed_3", 0, 0]], "input_layers": [["input_1", 0, 0]], "name": "model_1"}, "keras_version": "2.1.2", "class_name": "Model", "backend": "tensorflow"}
--------------------------------------------------------------------------------
/training env/default_gzclient_camera(1)-2020-01-20T23_49_03.550191.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/training env/default_gzclient_camera(1)-2020-01-20T23_49_03.550191.jpg
--------------------------------------------------------------------------------
/training env/default_gzclient_camera(1)-2020-01-20T23_49_41.711470.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/training env/default_gzclient_camera(1)-2020-01-20T23_49_41.711470.jpg
--------------------------------------------------------------------------------
/training env/default_gzclient_camera(1)-2020-01-20T23_50_14.533198.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/training env/default_gzclient_camera(1)-2020-01-20T23_50_14.533198.jpg
--------------------------------------------------------------------------------
/training env/default_gzclient_camera(1)-2020-01-20T23_50_52.218574.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/training env/default_gzclient_camera(1)-2020-01-20T23_50_52.218574.jpg
--------------------------------------------------------------------------------
/training env/default_gzclient_camera(1)-2020-01-20T23_51_25.348340.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/training env/default_gzclient_camera(1)-2020-01-20T23_51_25.348340.jpg
--------------------------------------------------------------------------------
/training env/default_gzclient_camera(1)-2020-01-20T23_51_43.545469.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mw9385/Collision-avoidance/c5ba190952eed2f5d58d04f5db8791c607140e8e/training env/default_gzclient_camera(1)-2020-01-20T23_51_43.545469.jpg
--------------------------------------------------------------------------------