├── CMakeLists.txt
├── LICENSE
├── README.md
├── Repeat
├── backward.py
├── forward.py
└── relocalization.py
├── launch
└── create_map.launch
├── package.xml
├── semantic_mapping
├── map.py
└── orb_obj.py
└── src
└── show_map.cpp
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.0.2)
2 | project(semantic_visual_teach_repeat)
3 |
4 | ## Compile as C++11, supported in ROS Kinetic and newer
5 | # add_compile_options(-std=c++11)
6 |
7 | ## Find catkin macros and libraries
8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9 | ## is used, also find other catkin packages
10 | find_package(catkin REQUIRED COMPONENTS
11 | geometry_msgs
12 | roscpp
13 | rospy
14 | std_msgs
15 | sensor_msgs
16 | visualization_msgs
17 | OpenCV
18 | message_filters
19 | )
20 | find_package(Eigen3 3.1.0 REQUIRED)
21 | ## System dependencies are found with CMake's conventions
22 | # find_package(Boost REQUIRED COMPONENTS system)
23 |
24 |
25 | ## Uncomment this if the package has a setup.py. This macro ensures
26 | ## modules and global scripts declared therein get installed
27 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
28 | # catkin_python_setup()
29 |
30 | ################################################
31 | ## Declare ROS messages, services and actions ##
32 | ################################################
33 |
34 | ## To declare and build messages, services or actions from within this
35 | ## package, follow these steps:
36 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
37 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
38 | ## * In the file package.xml:
39 | ## * add a build_depend tag for "message_generation"
40 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
41 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
42 | ## but can be declared for certainty nonetheless:
43 | ## * add a exec_depend tag for "message_runtime"
44 | ## * In this file (CMakeLists.txt):
45 | ## * add "message_generation" and every package in MSG_DEP_SET to
46 | ## find_package(catkin REQUIRED COMPONENTS ...)
47 | ## * add "message_runtime" and every package in MSG_DEP_SET to
48 | ## catkin_package(CATKIN_DEPENDS ...)
49 | ## * uncomment the add_*_files sections below as needed
50 | ## and list every .msg/.srv/.action file to be processed
51 | ## * uncomment the generate_messages entry below
52 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
53 |
54 | ## Generate messages in the 'msg' folder
55 | # add_message_files(
56 | # FILES
57 | # Message1.msg
58 | # Message2.msg
59 | # )
60 |
61 | ## Generate services in the 'srv' folder
62 | # add_service_files(
63 | # FILES
64 | # Service1.srv
65 | # Service2.srv
66 | # )
67 |
68 | ## Generate actions in the 'action' folder
69 | # add_action_files(
70 | # FILES
71 | # Action1.action
72 | # Action2.action
73 | # )
74 |
75 | ## Generate added messages and services with any dependencies listed here
76 | # generate_messages(
77 | # DEPENDENCIES
78 | # std_msgs
79 | # )
80 |
81 | ################################################
82 | ## Declare ROS dynamic reconfigure parameters ##
83 | ################################################
84 |
85 | ## To declare and build dynamic reconfigure parameters within this
86 | ## package, follow these steps:
87 | ## * In the file package.xml:
88 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
89 | ## * In this file (CMakeLists.txt):
90 | ## * add "dynamic_reconfigure" to
91 | ## find_package(catkin REQUIRED COMPONENTS ...)
92 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
93 | ## and list every .cfg file to be processed
94 |
95 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
96 | # generate_dynamic_reconfigure_options(
97 | # cfg/DynReconf1.cfg
98 | # cfg/DynReconf2.cfg
99 | # )
100 |
101 | ###################################
102 | ## catkin specific configuration ##
103 | ###################################
104 | ## The catkin_package macro generates cmake config files for your package
105 | ## Declare things to be passed to dependent projects
106 | ## INCLUDE_DIRS: uncomment this if your package contains header files
107 | ## LIBRARIES: libraries you create in this project that dependent projects also need
108 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
109 | ## DEPENDS: system dependencies of this project that dependent projects also need
110 | catkin_package(
111 | # INCLUDE_DIRS include
112 | # LIBRARIES semantic_visual_teach_repeat
113 | # CATKIN_DEPENDS roscpp rospy std_msgs
114 | # DEPENDS system_lib
115 | )
116 |
117 | ###########
118 | ## Build ##
119 | ###########
120 |
121 | ## Specify additional locations of header files
122 | ## Your package locations should be listed before other locations
123 | include_directories(
124 | # include
125 | ${catkin_INCLUDE_DIRS}
126 | ${OpenCV_INCLUDE_DIRS}
127 | ${EIGEN3_INCLUDE_DIR}
128 | ${message_filters_INCLUDE_DIRS}
129 | )
130 |
131 | ## Declare a C++ library
132 | # add_library(${PROJECT_NAME}
133 | # src/${PROJECT_NAME}/semantic_visual_teach_repeat.cpp
134 | # )
135 |
136 | ## Add cmake target dependencies of the library
137 | ## as an example, code may need to be generated before libraries
138 | ## either from message generation or dynamic reconfigure
139 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
140 |
141 | ## Declare a C++ executable
142 | ## With catkin_make all packages are built within a single CMake context
143 | ## The recommended prefix ensures that target names across packages don't collide
144 | add_executable(visualizer src/show_map.cpp)
145 |
146 | ## Rename C++ executable without prefix
147 | ## The above recommended prefix causes long target names, the following renames the
148 | ## target back to the shorter version for ease of user use
149 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
150 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
151 |
152 | ## Add cmake target dependencies of the executable
153 | ## same as for the library above
154 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
155 |
156 | ## Specify libraries to link a library or executable target against
157 | target_link_libraries(visualizer
158 | ${catkin_LIBRARIES}
159 | ${OpenCV_LIBS}
160 | ${EIGEN3_LIBS}
161 | ${message_filters_LIBRARIES}
162 | )
163 |
164 | #############
165 | ## Install ##
166 | #############
167 |
168 | # all install targets should use catkin DESTINATION variables
169 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
170 |
171 | ## Mark executable scripts (Python etc.) for installation
172 | ## in contrast to setup.py, you can choose the destination
173 | catkin_install_python(PROGRAMS
174 | semantic_mapping/orb_obj.py
175 | semantic_mapping/map.py
176 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
177 | )
178 |
179 | ## Mark executables for installation
180 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
181 | # install(TARGETS ${PROJECT_NAME}_node
182 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
183 | # )
184 |
185 | ## Mark libraries for installation
186 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
187 | # install(TARGETS ${PROJECT_NAME}
188 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
189 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
190 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
191 | # )
192 |
193 | ## Mark cpp header files for installation
194 | # install(DIRECTORY include/${PROJECT_NAME}/
195 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
196 | # FILES_MATCHING PATTERN "*.h"
197 | # PATTERN ".svn" EXCLUDE
198 | # )
199 |
200 | ## Mark other files for installation (e.g. launch and bag files, etc.)
201 | # install(FILES
202 | # # myfile1
203 | # # myfile2
204 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
205 | # )
206 |
207 | #############
208 | ## Testing ##
209 | #############
210 |
211 | ## Add gtest based cpp test target and link libraries
212 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_semantic_visual_teach_repeat.cpp)
213 | # if(TARGET ${PROJECT_NAME}-test)
214 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
215 | # endif()
216 |
217 | ## Add folders to be run by python nosetests
218 | # catkin_add_nosetests(test)
219 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2022, mohammad mahdavian
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # semantic_visual_teach_repeat
2 | This is the repo for the paper [Robust Visual Teach and Repeat for UGVs Using 3D Semantic Maps](https://arxiv.org/pdf/2109.10445.pdf). It includes a robotic package for a semantic mapping based algorithm for visual teach and repeat
3 |
4 | ## Installation
5 | In order to use this package you need to have ORB-SLAM installed with some changes. So first:
6 |
7 | 1. Clone and install https://github.com/mmahdavian/ORB_SLAM3
8 |
9 | I have used ZED2 camera and you can use this file: https://drive.google.com/file/d/1pnH8I0jPJyFRbS0umJ-sP9qohCG2cBOJ/view?usp=sharing
10 |
11 | 2. You need a YOLOv3 model to detect objects in the environment. We have provided a trained model file you can use. It has been trained on 24 most common objects in the environment. Those objects are: person,bench,backpack,umbrella,handbag,suitcase,bottle,cup,bowl,banana,apple,chair,couch,bed,tv,laptop,mouse,remote,keyboard,microwave,oven,toaster,sink,clock
12 | https://drive.google.com/file/d/1S49rcLiUPrg4bG95tJpmsuYaCCiGi_Of/view?usp=sharing
13 | Download the file. You will move the file to darknet-ros package later. If you want to use your own Yolo model you need to modify "semantic_mapping/map.py" code. In gotdata function there are three variables named big_objs,medium_objs and small_objs. They specify general size of each class of objects detected by Yolo. For example Sofa is a big objects and banana is a small one. So based on your model and class numbers change the code.
14 | 3. You need to use darknet_ros package to publish detected objects by Yolo model to ROS. For this purpose you can use following link. Clone it in your catkin_ws/src:
15 | https://github.com/mmahdavian/darknet_ros
16 |
17 | Then move the downloaded model file to darknet_ros/darknet_ros/yolo_network_config/weights folder.
18 |
19 | 4. In your catkin_ws/src open a terminal and: git clone https://github.com/mmahdavian/semantic_visual_teach_repeat.git
20 | 5. cd ../
21 | 6. catkin_make
22 |
23 | ## Testing
24 | ### Teach Phase
25 | 1. roslaunch darknet_ros yolo_v3.launch
26 | 2. Then run ORB-SLAM using:
27 |
28 | rosrun ORB_SLAM3 Mono Vocabulary/ORBvoc.txt "your camera yaml file"
29 |
30 | 3. roslaunch semantic_visual_teach_repeat create_map.launch
31 | Then you need to move the camera which will be your teach path and the (Visual Teach and Repeat)VTR algorithm will generate a semantic map of the environment for you as teach map.
32 |
33 | Robot Path:
34 |
35 |
36 |
37 | Generated Semantic Map:
38 |
39 |
40 |
41 | ### Repeat Phase
42 | 4. In order to repeat the same motion, first you need to change the name of the generated map and path file from:
43 | 'projection.csv' to 'C1.csv' and 'obj_map.csv' to 'M1.csv'
44 | 5. At repeat time you need to do step 1 to 3 first. Then after some motion, when you have at least two objects in your semantic map:
45 |
46 | cd semantic_visual_teach_repeat/Repeat
47 |
48 | python3 relocalization.py
49 |
50 | Then robot will be relocalized to a new location.
51 |
52 | 'python3 forward.py' or 'python3 backward.py' can move the turtlebot2 robot toward the teach path and repeat in forward/backward direction.
53 |
54 | ## Video
55 | You can find a video from the our visual teach and repeat algorithm performance here:
56 | https://www.youtube.com/watch?v=raRT7S9NSfc&t
57 |
58 | ## License and Refrence
59 | This package is released under BSD-3-Clause License.
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/Repeat/backward.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import numpy as np
3 | import csv
4 | import rospy
5 | from sensor_msgs.msg import PointCloud,ChannelFloat32
6 | from geometry_msgs.msg import PoseStamped,Point32,PoseArray,Pose
7 | from scipy.spatial.transform import Rotation as R
8 | import time
9 | import os
10 | import math
11 | import matplotlib.pyplot as plt
12 | from numpy import pi,cos,sin
13 | from geometry_msgs.msg import Twist
14 | from std_srvs.srv import Trigger,TriggerResponse
15 | import copy
16 |
17 | class controller():
18 | def __init__(self,teach_map,camera1):
19 |
20 | self.init_mode = True
21 | self.path1 = camera1
22 | self.cp_idx = 0
23 | self.path_angles = []
24 | self.current_euler = 0
25 | self.tot_path = None
26 | self.repeat_done = False
27 | self.start_position = None
28 | self.T_changed = False
29 |
30 | rospy.init_node('listener', anonymous=True)
31 | self.pose_sub = rospy.Subscriber('/new_orb_pose', PoseStamped, self.get_pose)
32 | self.velocity_publisher = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=10)
33 | self.trig = rospy.Service('/new_loc',Trigger,self.trigger)
34 | self.goal_points_publisher = rospy.Publisher('/goal_points_pose', PointCloud , queue_size=1)
35 | self.nodrift_cam_pub = rospy.Publisher('/new_nodrift_cam_pub', PoseStamped , queue_size=1)
36 |
37 | while not rospy.is_shutdown():
38 | if self.init_mode == True:
39 | while self.start_position == None:
40 | time.sleep(1)
41 | self.move_init(self.start_position)
42 | tot_path,tot_angles = self.follow_teach(camera1)
43 | if self.repeat_done == True:
44 | return
45 |
46 | def move_init(self,start_position):
47 | #### go to closest point on teach path
48 | new_position = [start_position[0],start_position[1]]
49 | closest_point,idx = self.closest_to_teach(new_position)
50 | self.closest_point = closest_point
51 | self.cp_idx = idx
52 | start = [new_position[0], new_position[1]]
53 | goal = [closest_point[0], closest_point[2]]
54 |
55 | init_path = []
56 | init_path.append(start)
57 | init_path.append(goal)
58 |
59 | self.init_mode = False
60 | self.tot_path = init_path
61 | tot_path,tot_angles = self.follow_teach(camera1)
62 |
63 |
64 | def closest_to_teach(self,cur_pose):
65 | min_error = 100
66 | idx = 0
67 | for i,point in enumerate(self.path1):
68 | error = np.linalg.norm([cur_pose[0]-point[0],cur_pose[1]-point[2]])
69 | if error 0:
141 | sign = -1
142 | else:
143 | sign = 1
144 | if dis_yaw > 180:
145 | sign = 1
146 | if dis_yaw < -180:
147 | sign = -1
148 |
149 | dis_yaw_thresh = 3
150 |
151 | self.xp_cons = copy.deepcopy(self.xp)
152 | self.zp_cons = copy.deepcopy(self.zp)
153 |
154 | init_angle = self.current_euler
155 | while(abs(dis_yaw) > dis_yaw_thresh):
156 | if self.T_changed == True:
157 | self.init_mode == True
158 | self.T_changed == False
159 | return
160 |
161 | print("prev_p: "+str(self.tot_path[i])+"\n"+"next:"+str(self.tot_path[i+1])+"\n"+"cur:"+str([self.xp,self.zp]))
162 | dx = self.tot_path[i+1][0] - self.xp
163 | dz = self.tot_path[i+1][1] - self.zp
164 |
165 | goal_yaw = np.arctan2(dx,dz)
166 | if goal_yaw<0:
167 | goal_yaw += 2*np.pi
168 | goal_yaw = goal_yaw *180/np.pi
169 | dis_yaw = goal_yaw - self.current_euler
170 | print("yaw goal is: " +str(goal_yaw)+" current yaw is: "+str(self.current_euler)+" dis_yaw is: "+str(dis_yaw))
171 | if dis_yaw > 0:
172 | sign = -1
173 | else:
174 | sign = 1
175 | if dis_yaw > 180:
176 | sign = 1
177 | if dis_yaw < -180:
178 | sign = -1
179 |
180 | vel_msg.linear.x = 0
181 | vel_msg.linear.y = 0
182 | vel_msg.linear.z = 0
183 | vel_msg.angular.x = 0
184 | vel_msg.angular.y = 0
185 |
186 | vel_msg.angular.z = sign * 0.45 # check the sign later
187 | self.velocity_publisher.publish(vel_msg)
188 |
189 | if abs(init_angle-self.current_euler)>25:
190 | print("waiting for ORB-SLAM")
191 | time.sleep(4)
192 | init_angle = self.current_euler
193 |
194 |
195 | nodrift_cam = PoseStamped()
196 | nodrift_cam.header.stamp = rospy.Time.now()
197 | nodrift_cam.header.frame_id = "map"
198 | nodrift_cam.pose.position.x = self.xp
199 | nodrift_cam.pose.position.y = self.yp
200 | nodrift_cam.pose.position.z = self.zp
201 |
202 | nodrift_cam.pose.orientation.x = self.xa
203 | nodrift_cam.pose.orientation.y = self.ya
204 | nodrift_cam.pose.orientation.z = self.za
205 | nodrift_cam.pose.orientation.w = self.wa
206 |
207 | self.nodrift_cam_pub.publish(nodrift_cam)
208 |
209 |
210 | drift_x = copy.deepcopy(self.xp) - self.xp_cons
211 | drift_z = copy.deepcopy(self.zp) - self.zp_cons
212 |
213 | print("reached the angle")
214 | time.sleep(2)
215 |
216 | dis_p = np.linalg.norm([self.tot_path[i+1][0] - self.xp,self.tot_path[i+1][1] - self.zp])
217 |
218 | dis_p_thresh = 0.05
219 | while(dis_p > dis_p_thresh):
220 | if self.T_changed == True:
221 | self.init_mode == True
222 | self.T_changed == False
223 | return
224 |
225 | dx = self.tot_path[i+1][0] - (self.xp )
226 | dz = self.tot_path[i+1][1] - (self.zp )
227 | goal_yaw = np.arctan2(dx,dz)
228 | if goal_yaw<0:
229 | goal_yaw += 2*np.pi
230 |
231 | goal_yaw = goal_yaw * 180/np.pi
232 |
233 | dis_yaw = goal_yaw - self.current_euler
234 | print("goal: " +str(self.tot_path[i+1])+" current: "+str([self.xp,self.zp])+" dis: "+str(dis_p)+" dis_yaw:"+str(dis_yaw))
235 |
236 | dis_p = np.linalg.norm([self.tot_path[i+1][0] - (self.xp),self.tot_path[i+1][1] - (self.zp)])
237 |
238 | vel_msg.linear.x = 0.1
239 | vel_msg.linear.y = 0
240 | vel_msg.linear.z = 0
241 | vel_msg.angular.x = 0
242 | vel_msg.angular.y = 0
243 | if abs(dis_yaw)>dis_yaw_thresh:
244 |
245 | if dis_yaw > 0:
246 | sign = -1
247 | else:
248 | sign = 1
249 | if dis_yaw > 180:
250 | sign = 1
251 | if dis_yaw < -180:
252 | sign = -1
253 |
254 | print("dis yaw is: "+str(dis_yaw))
255 | vel_msg.angular.z = sign*0.2
256 | print("angle cmd: " +str(sign*0.2))
257 |
258 | else:
259 | vel_msg.angular.z = 0
260 |
261 | self.velocity_publisher.publish(vel_msg)
262 |
263 | nodrift_cam = PoseStamped()
264 | nodrift_cam.header.stamp = rospy.Time.now()
265 | nodrift_cam.header.frame_id = "map"
266 |
267 | nodrift_cam.pose.position.x = self.xp
268 | nodrift_cam.pose.position.y = self.yp
269 | nodrift_cam.pose.position.z = self.zp
270 |
271 |
272 |
273 | nodrift_cam.pose.orientation.x = self.xa
274 | nodrift_cam.pose.orientation.y = self.ya
275 | nodrift_cam.pose.orientation.z = self.za
276 | nodrift_cam.pose.orientation.w = self.wa
277 |
278 | self.nodrift_cam_pub.publish(nodrift_cam)
279 |
280 |
281 | print("reached a point")
282 | time.sleep(1)
283 | #After the loop, stops the robot
284 | vel_msg.linear.x = 0
285 | #Force the robot to stop
286 | self.velocity_publisher.publish(vel_msg)
287 | self.repeat_done = True
288 |
289 |
290 | return self.tot_path,self.path_angles
291 |
292 | def get_pose(self,pose):
293 | self.xp = pose.pose.position.x
294 | self.yp = pose.pose.position.y
295 | self.zp = pose.pose.position.z
296 | self.xa = pose.pose.orientation.x
297 | self.ya = pose.pose.orientation.y
298 | self.za = pose.pose.orientation.z
299 | self.wa = pose.pose.orientation.w
300 | r2 = R.from_quat([self.xa,self.ya,self.za,self.wa])
301 | yaw_angle = r2.as_euler('XZY')
302 | self.current_euler = yaw_angle[2] *180/np.pi
303 | if self.current_euler < 0:
304 | self.current_euler += 360
305 | self.start_position = [self.xp,self.zp]
306 | start_euler = [self.current_euler]
307 |
308 | def trigger(self,req):
309 | self.T_changed = True
310 | print("trigger called")
311 | return TriggerResponse(True,"hi")
312 |
313 | if __name__ == '__main__':
314 |
315 | camera1 = []
316 | teach_map = []
317 |
318 | with open('/home/mohammad/Desktop/C1.csv', mode = 'r') as cam1:
319 | teach_cam = csv.reader(cam1, delimiter=',',quoting=csv.QUOTE_NONNUMERIC)
320 | for row in teach_cam:
321 | camera1.append(row)
322 |
323 |
324 | with open('/home/mohammad/Desktop/M1.csv', mode = 'r') as map1:
325 | teach_csv = csv.reader(map1, delimiter=',',quoting=csv.QUOTE_NONNUMERIC)
326 | for row in teach_csv:
327 | teach_map.append(row)
328 |
329 | print(teach_map)
330 | controller(teach_map, camera1)
331 | rospy.spin()
332 |
333 |
--------------------------------------------------------------------------------
/Repeat/forward.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import numpy as np
3 | import csv
4 | import rospy
5 | from sensor_msgs.msg import PointCloud,ChannelFloat32
6 | from geometry_msgs.msg import PoseStamped,Point32,PoseArray,Pose
7 | from scipy.spatial.transform import Rotation as R
8 | import time
9 | import os
10 | import math
11 | import matplotlib.pyplot as plt
12 | from numpy import pi,cos,sin
13 | from geometry_msgs.msg import Twist
14 | from std_srvs.srv import Trigger,TriggerResponse
15 | import copy
16 |
17 | class controller():
18 | def __init__(self,teach_map,camera1):
19 |
20 | self.init_mode = True
21 | self.path1 = camera1
22 | self.cp_idx = 0
23 | self.path_angles = []
24 | self.current_euler = 0
25 | self.tot_path = None
26 | self.repeat_done = False
27 | self.start_position = None
28 | self.T_changed = False
29 |
30 | rospy.init_node('listener', anonymous=True)
31 | self.pose_sub = rospy.Subscriber('/new_orb_pose', PoseStamped, self.get_pose)
32 | self.velocity_publisher = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=10)
33 | self.trig = rospy.Service('/new_loc',Trigger,self.trigger)
34 | self.goal_points_publisher = rospy.Publisher('/goal_points_pose', PointCloud , queue_size=1)
35 | self.nodrift_cam_pub = rospy.Publisher('/new_nodrift_cam_pub', PoseStamped , queue_size=1)
36 |
37 | while not rospy.is_shutdown():
38 | if self.init_mode == True:
39 | while self.start_position == None:
40 | time.sleep(1)
41 | self.move_init(self.start_position)
42 | tot_path,tot_angles = self.follow_teach(camera1)
43 | if self.repeat_done == True:
44 | return
45 |
46 | def move_init(self,start_position):
47 | #### go to closest point on teach path
48 | new_position = [start_position[0],start_position[1]]
49 | closest_point,idx = self.closest_to_teach(new_position)
50 | self.closest_point = closest_point
51 | self.cp_idx = idx
52 | start = [new_position[0], new_position[1]]
53 | goal = [closest_point[0], closest_point[2]]
54 |
55 | init_path = []
56 | init_path.append(start)
57 | init_path.append(goal)
58 |
59 | self.init_mode = False
60 | self.tot_path = init_path
61 | tot_path,tot_angles = self.follow_teach(camera1)
62 |
63 |
64 | def closest_to_teach(self,cur_pose):
65 | min_error = 100
66 | idx = 0
67 | for i,point in enumerate(self.path1):
68 | error = np.linalg.norm([cur_pose[0]-point[0],cur_pose[1]-point[2]])
69 | if error 10:
146 | omega = W[-1]
147 | else:
148 | omega = (Teta[-1] - Teta[-2])/dt
149 | W.append(omega)
150 | v = np.sqrt(x_dot**2 + y_dot**2)
151 | tot_v.append(v)
152 |
153 | V_X[-1] = vx2
154 | V_Y[-1] = vy2
155 | tot_v[-1] = v2
156 | Teta[-1] = yaw2*180/pi
157 |
158 | return tot_v,W,Teta,X_pos,Y_pos
159 |
160 | def follow_teach(self,camera1):
161 | vel_msg = Twist()
162 | X = []
163 | Y = []
164 | while(self.tot_path == None):
165 | time.sleep(1)
166 |
167 | for i in range(self.cp_idx,len(self.path1)):
168 | point = self.path1[i]
169 | if (self.cp_idx - i)%100 == 0:
170 | path_point = [point[0],point[2]]
171 | self.tot_path.append(path_point)
172 | self.tot_path.append([self.path1[-1][0],self.path1[-1][2]])
173 | for i in range(len(self.tot_path)-1):
174 | dx = self.tot_path[i+1][0] - self.tot_path[i][0]
175 | dy = self.tot_path[i+1][1] - self.tot_path[i][1]
176 | X.append(self.tot_path[i][0])
177 | Y.append(self.tot_path[i][1])
178 | print(X)
179 | print(Y)
180 |
181 | angle = np.arctan2(dy,dx)
182 | if angle<0:
183 | angle += 2*np.pi
184 | self.path_angles.append(angle*180/np.pi)
185 |
186 | #### publish goal points
187 | goal_points_cloud = PointCloud();
188 | goal_points = []
189 | for j in range(len(self.tot_path)):
190 | gp = self.tot_path[j]
191 | pt = Point32()
192 | pt.x = gp[0]
193 | pt.y = 0
194 | pt.z = gp[1]
195 | goal_points.append(pt)
196 |
197 | goal_points_cloud.header.frame_id = "map"
198 | goal_points_cloud.header.stamp = rospy.Time.now()
199 | goal_points_cloud.points = goal_points
200 | goal_points_cloud.channels = []
201 | self.goal_points_publisher.publish(goal_points_cloud)
202 |
203 | drift_x = 0
204 | drift_z = 0
205 |
206 | print("planning")
207 | for i in range(len(self.tot_path)):
208 | if self.T_changed == True:
209 | self.init_mode == True
210 | self.T_changed == False
211 | return
212 |
213 | dx = self.tot_path[i+1][0] - (self.xp)
214 | dz = self.tot_path[i+1][1] - (self.zp)
215 | goal_yaw = np.arctan2(dx,dz)
216 |
217 | if goal_yaw<0:
218 | goal_yaw += 2*np.pi
219 | goal_yaw = goal_yaw *180/np.pi
220 | dis_yaw = goal_yaw - self.current_euler
221 | if dis_yaw > 0:
222 | sign = -1
223 | else:
224 | sign = 1
225 | if dis_yaw > 180:
226 | sign = 1
227 | if dis_yaw < -180:
228 | sign = -1
229 |
230 | dis_yaw_thresh = 3
231 |
232 | self.xp_cons = copy.deepcopy(self.xp)
233 | self.zp_cons = copy.deepcopy(self.zp)
234 |
235 | init_angle = self.current_euler
236 | while(abs(dis_yaw) > dis_yaw_thresh):
237 | if self.T_changed == True:
238 | self.init_mode == True
239 | self.T_changed == False
240 | return
241 |
242 | print("prev_p: "+str(self.tot_path[i])+"\n"+"next:"+str(self.tot_path[i+1])+"\n"+"cur:"+str([self.xp,self.zp]))
243 | dx = self.tot_path[i+1][0] - self.xp
244 | dz = self.tot_path[i+1][1] - self.zp
245 |
246 | goal_yaw = np.arctan2(dx,dz)
247 | if goal_yaw<0:
248 | goal_yaw += 2*np.pi
249 | goal_yaw = goal_yaw *180/np.pi
250 | dis_yaw = goal_yaw - self.current_euler
251 | print("yaw goal is: " +str(goal_yaw)+" current yaw is: "+str(self.current_euler)+" dis_yaw is: "+str(dis_yaw))
252 | if dis_yaw > 0:
253 | sign = -1
254 | else:
255 | sign = 1
256 | if dis_yaw > 180:
257 | sign = 1
258 | if dis_yaw < -180:
259 | sign = -1
260 |
261 | vel_msg.linear.x = 0
262 | vel_msg.linear.y = 0
263 | vel_msg.linear.z = 0
264 | vel_msg.angular.x = 0
265 | vel_msg.angular.y = 0
266 |
267 | vel_msg.angular.z = sign * 0.45 # check the sign later
268 | self.velocity_publisher.publish(vel_msg)
269 |
270 | if abs(init_angle-self.current_euler)>25:
271 | print("waiting for ORB-SLAM")
272 | time.sleep(4)
273 | init_angle = self.current_euler
274 |
275 |
276 | nodrift_cam = PoseStamped()
277 | nodrift_cam.header.stamp = rospy.Time.now()
278 | nodrift_cam.header.frame_id = "map"
279 | nodrift_cam.pose.position.x = self.xp
280 | nodrift_cam.pose.position.y = self.yp
281 | nodrift_cam.pose.position.z = self.zp
282 |
283 | nodrift_cam.pose.orientation.x = self.xa
284 | nodrift_cam.pose.orientation.y = self.ya
285 | nodrift_cam.pose.orientation.z = self.za
286 | nodrift_cam.pose.orientation.w = self.wa
287 |
288 | self.nodrift_cam_pub.publish(nodrift_cam)
289 |
290 |
291 | drift_x = copy.deepcopy(self.xp) - self.xp_cons
292 | drift_z = copy.deepcopy(self.zp) - self.zp_cons
293 |
294 | print("reached the angle")
295 | time.sleep(2)
296 |
297 | dis_p = np.linalg.norm([self.tot_path[i+1][0] - self.xp,self.tot_path[i+1][1] - self.zp])
298 |
299 | dis_p_thresh = 0.05
300 | while(dis_p > dis_p_thresh):
301 | if self.T_changed == True:
302 | self.init_mode == True
303 | self.T_changed == False
304 | return
305 |
306 | dx = self.tot_path[i+1][0] - (self.xp )
307 | dz = self.tot_path[i+1][1] - (self.zp )
308 | goal_yaw = np.arctan2(dx,dz)
309 | if goal_yaw<0:
310 | goal_yaw += 2*np.pi
311 |
312 | goal_yaw = goal_yaw * 180/np.pi
313 | dis_yaw = goal_yaw - self.current_euler
314 | print("goal: " +str(self.tot_path[i+1])+" current: "+str([self.xp,self.zp])+" dis: "+str(dis_p)+" dis_yaw:"+str(dis_yaw))
315 |
316 | dis_p = np.linalg.norm([self.tot_path[i+1][0] - (self.xp),self.tot_path[i+1][1] - (self.zp)])
317 |
318 | vel_msg.linear.x = 0.1
319 | vel_msg.linear.y = 0
320 | vel_msg.linear.z = 0
321 | vel_msg.angular.x = 0
322 | vel_msg.angular.y = 0
323 | if abs(dis_yaw)>dis_yaw_thresh:
324 |
325 | if dis_yaw > 0:
326 | sign = -1
327 | else:
328 | sign = 1
329 | if dis_yaw > 180:
330 | sign = 1
331 | if dis_yaw < -180:
332 | sign = -1
333 |
334 | print("dis yaw is: "+str(dis_yaw))
335 | vel_msg.angular.z = sign*0.2
336 | print("angle cmd: " +str(sign*0.2))
337 |
338 | else:
339 | vel_msg.angular.z = 0
340 |
341 | self.velocity_publisher.publish(vel_msg)
342 |
343 | nodrift_cam = PoseStamped()
344 | nodrift_cam.header.stamp = rospy.Time.now()
345 | nodrift_cam.header.frame_id = "map"
346 | nodrift_cam.pose.position.x = self.xp
347 | nodrift_cam.pose.position.y = self.yp
348 | nodrift_cam.pose.position.z = self.zp
349 |
350 |
351 |
352 | nodrift_cam.pose.orientation.x = self.xa
353 | nodrift_cam.pose.orientation.y = self.ya
354 | nodrift_cam.pose.orientation.z = self.za
355 | nodrift_cam.pose.orientation.w = self.wa
356 |
357 | self.nodrift_cam_pub.publish(nodrift_cam)
358 |
359 | print("reached a point")
360 | time.sleep(1)
361 | #After the loop, stops the robot
362 | vel_msg.linear.x = 0
363 | #Force the robot to stop
364 | self.velocity_publisher.publish(vel_msg)
365 | self.repeat_done = True
366 |
367 | return self.tot_path,self.path_angles
368 |
369 | def get_pose(self,pose):
370 | self.xp = pose.pose.position.x
371 | self.yp = pose.pose.position.y
372 | self.zp = pose.pose.position.z
373 | self.xa = pose.pose.orientation.x
374 | self.ya = pose.pose.orientation.y
375 | self.za = pose.pose.orientation.z
376 | self.wa = pose.pose.orientation.w
377 | r2 = R.from_quat([self.xa,self.ya,self.za,self.wa])
378 | yaw_angle = r2.as_euler('XZY')
379 | self.current_euler = yaw_angle[2] *180/np.pi
380 | if self.current_euler < 0:
381 | self.current_euler += 360
382 | self.start_position = [self.xp,self.zp]
383 | start_euler = [self.current_euler]
384 |
385 | def trigger(self,req):
386 | self.T_changed = True
387 | print("trigger called")
388 | return TriggerResponse(True,"hi")
389 |
390 | if __name__ == '__main__':
391 |
392 | # plt.figure()
393 |
394 | camera1 = []
395 | teach_map = []
396 |
397 | with open('/home/mohammad/Desktop/C1.csv', mode = 'r') as cam1:
398 | teach_cam = csv.reader(cam1, delimiter=',',quoting=csv.QUOTE_NONNUMERIC)
399 | for row in teach_cam:
400 | camera1.append(row)
401 |
402 |
403 | with open('/home/mohammad/Desktop/M1.csv', mode = 'r') as map1:
404 | teach_csv = csv.reader(map1, delimiter=',',quoting=csv.QUOTE_NONNUMERIC)
405 | for row in teach_csv:
406 | teach_map.append(row)
407 |
408 | print(teach_map)
409 | controller(teach_map, camera1)
410 | rospy.spin()
411 |
412 |
--------------------------------------------------------------------------------
/Repeat/relocalization.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import numpy as np
3 | from numpy import cos,sin,pi
4 | import matplotlib.pyplot as plt
5 | import csv
6 | import rospy
7 | from sensor_msgs.msg import PointCloud,ChannelFloat32
8 | from geometry_msgs.msg import PoseStamped,Point32,PoseArray,Pose
9 | from scipy.spatial.transform import Rotation as R
10 | import time
11 | import os
12 | from std_srvs.srv import Trigger,TriggerRequest
13 | import copy
14 |
15 | class repeating():
16 | def __init__(self,map1,camera1):
17 | self.teach_cams = camera1
18 | self.map1 = None
19 | self.camera1 = None
20 | self.all_objs = []
21 | self.K = np.array([[528.96002, 0, 620.22998],
22 | [0, 528.66498, 368.64499],
23 | [0, 0, 1]])
24 | self.map_rotation = np.array([[1 , 0 , 0 ],
25 | [0 ,cos(0*pi/180),-sin(0*pi/180)],
26 | [0 ,sin(0*pi/180),cos(0*pi/180)]])
27 | self.preprocessing(map1,camera1)
28 | self.L1 = self.map1
29 | self.L2 = None
30 | self.pos1 = self.camera1
31 | self.scale = 1
32 | self.T12 = np.array([[1,0,0,0],
33 | [0,1,0,0],
34 | [0,0,1,0],
35 | [0,0,0,1]])
36 | self.prev_T12 = np.array([[1,0,0,0],
37 | [0,1,0,0],
38 | [0,0,1,0],
39 | [0,0,0,1]])
40 | self.init_mode = True
41 | self.prev_scale = 1
42 | self.prev_rot = np.array([[1,0,0],
43 | [0,1,0],
44 | [0,0,1]])
45 |
46 | rospy.init_node('listener', anonymous=True)
47 | self.cam_sub = rospy.Subscriber('/orb_pose', PoseStamped, self.get_pose)
48 | self.map_sub = rospy.Subscriber('/map_objects', PointCloud , self.get_maps)
49 | self.new_obj_pub = rospy.Publisher('/new_map_objects', PointCloud , queue_size=1)
50 | self.new_cam_pub = rospy.Publisher('/new_orb_pose', PoseStamped , queue_size=1)
51 | self.teach_obj_pub = rospy.Publisher('/teach_map_objects', PointCloud , queue_size=1 )
52 | self.teach_cam_pub = rospy.Publisher('/teach_orb_pose', PoseArray , queue_size=1)
53 | self.trig = rospy.ServiceProxy('/new_loc', Trigger)
54 |
55 | def trigger(self):
56 | res = Trigger()
57 | res.success = True
58 | return TriggerResponse(res)
59 |
60 | def publish_teach(self):
61 |
62 | #### publish new objs
63 | teach_cloud = PointCloud();
64 | teach_classes = ChannelFloat32()
65 | geo_points = []
66 | obj_classes = []
67 | for j in range(len(self.L1)):
68 | obj = self.L1[j]
69 | pt = Point32()
70 | pt.x = obj[0]
71 | pt.y = obj[1]
72 | pt.z = obj[2]
73 | geo_points.append(pt)
74 | obj_classes.append(obj[3])
75 |
76 | teach_classes.values = np.array(obj_classes)
77 | teach_classes.name = "teach objects in map"
78 | teach_cloud.header.frame_id = "map"
79 | teach_cloud.header.stamp = rospy.Time.now()
80 | teach_cloud.points = geo_points
81 | teach_cloud.channels = [teach_classes]
82 | self.teach_obj_pub.publish(teach_cloud)
83 |
84 |
85 | #### publish teach camera frames
86 | self.short_cam1 = []
87 | for i,line in enumerate(self.teach_cams):
88 | if i%10 == 0:
89 | self.short_cam1.append(line)
90 |
91 | teach_cam_arr = PoseArray()
92 | teach_cam_arr.header.stamp = rospy.Time.now()
93 | teach_cam_arr.header.frame_id = "map"
94 |
95 | tot_poses = []
96 | for i in range(len(self.short_cam1)):
97 |
98 | cur_pose = Pose()
99 | line = self.short_cam1[i]
100 | cur_pose.position.x = line[0]
101 | cur_pose.position.y = line[1]
102 | cur_pose.position.z = line[2]
103 |
104 | cur_pose.orientation.x = line[3]
105 | cur_pose.orientation.y = line[4]
106 | cur_pose.orientation.z = line[5]
107 | cur_pose.orientation.w = line[6]
108 | tot_poses.append(cur_pose)
109 | teach_cam_arr.poses = tot_poses
110 | self.teach_cam_pub.publish(teach_cam_arr)
111 |
112 |
113 | def preprocessing(self,map1,camera1):
114 | for i in range(len(map1)):
115 | obj = map1[i]
116 | obj_pos = np.array([float(obj[0]),float(obj[1]),float(obj[2])]).T
117 | obj_rotated_pos = np.dot(self.map_rotation,obj_pos)
118 | map1[i][0] = obj_rotated_pos[0]
119 | map1[i][1] = obj_rotated_pos[1]
120 | map1[i][2] = obj_rotated_pos[2]
121 | self.map1 = map1
122 |
123 | pos1 = []
124 | for i, line in enumerate(camera1):
125 | x1 = float(line[0])
126 | y1 = float(line[1])
127 | z1 = float(line[2])
128 | pos = np.array([x1,y1,z1]).T
129 | rotated_pos = np.dot(self.map_rotation,pos)
130 | pos1.append(rotated_pos)
131 |
132 |
133 | pos1 = np.array(pos1)
134 | self.camera1 = pos1
135 |
136 |
137 | def max_dis(self):
138 | dmax1 = 0
139 | for m1 in self.L1:
140 | for m2 in self.L1:
141 | d = np.linalg.norm([m1[0]-m2[0],m1[2]-m2[2]])
142 | if d>dmax1:
143 | dmax1 = d
144 |
145 | dmax2 = 0
146 | for m1 in self.L2:
147 | for m2 in self.L2:
148 | d = np.linalg.norm([m1[0]-m2[0],m1[2]-m2[2]])
149 | if d>dmax2:
150 | dmax2 = d
151 |
152 | max_dis_L1 = dmax1
153 | max_dis_L2 = dmax2
154 | return [max_dis_L1,max_dis_L2]
155 |
156 |
157 | def get_maps(self,obj_map):
158 | all_objs = []
159 | map_points = obj_map.points
160 | if len(map_points)<2:
161 | print("too soon")
162 | return 0
163 | for i in range(len(map_points)):
164 | classes = obj_map.channels[0].values
165 | cur_obj = classes[i]
166 | pt = Point32()
167 | pt = map_points[i]
168 | mpt = np.array([pt.x,pt.y,pt.z]).T
169 | new_mpt = np.dot(self.map_rotation,mpt)
170 | all_objs.append([new_mpt[0],new_mpt[1],new_mpt[2],cur_obj])
171 | self.all_objs = all_objs
172 | self.L2 = all_objs
173 | if self.T12[0][0] == 1:
174 | self.reloc()
175 |
176 |
177 | def get_pose(self,cam):
178 | self.publish_teach()
179 |
180 | c=np.zeros(3)
181 | R=np.zeros(4)
182 | cam_position = np.array([cam.pose.position.x,cam.pose.position.y,cam.pose.position.z]).T
183 | new_cam_position = np.dot(self.map_rotation,cam_position)
184 |
185 | c[0] = new_cam_position[0]
186 | c[1] = new_cam_position[1]
187 | c[2] = new_cam_position[2]
188 |
189 | R[0] = cam.pose.orientation.x
190 | R[1] = cam.pose.orientation.y
191 | R[2] = cam.pose.orientation.z
192 | R[3] = cam.pose.orientation.w
193 |
194 | K = self.K
195 | Rot = np.array([[1-2*(R[1]**2)-2*(R[2]**2),2*R[0]*R[1]+2*R[2]*R[3],2*R[0]*R[2]-2*R[1]*R[3],0],
196 | [2*R[0]*R[1]-2*R[2]*R[3],1-2*(R[0]**2)-2*(R[2]**2),2*R[1]*R[2]+2*R[0]*R[3],0],
197 | [2*R[0]*R[2]+2*R[1]*R[3],2*R[1]*R[2]-2*R[0]*R[3],1-2*(R[0]**2)-2*(R[1]**2),0],
198 | [0,0,0,1]])
199 |
200 |
201 | Proj = np.zeros((3,4))
202 | Proj[0:3,0:3] = Rot[0:3,0:3]
203 | tcw_ros = np.array([[-c[0]],[-c[1]],[-c[2]]])
204 | tcw_orb = np.dot(Rot[0:3,0:3],tcw_ros)
205 |
206 | Proj[0,3]=tcw_orb[0]
207 | Proj[1,3]=tcw_orb[1]
208 | Proj[2,3]=tcw_orb[2]
209 | cur_cam = cam.pose.position
210 | cur_cam_position = c
211 |
212 | if c[0] !=0 and c[1] !=0:
213 | with open('/home/mohammad/Desktop/cur_cam.csv', mode='a') as proj_file:
214 | proj_writer = csv.writer(proj_file, delimiter=',')
215 | proj_writer.writerow([c[0],c[1],c[2],R[0],R[1],R[2],R[3]])
216 |
217 |
218 | if c[0] !=0 and c[1] !=0 and self.L2 != None:
219 | self.transfer_motions(cur_cam_position,Proj)
220 | print(self.T12)
221 | print(self.scale)
222 |
223 | def theta(self,o1,o2,L,delta):
224 |
225 | p1 = np.array([[o1[0].tolist()],[o1[2].tolist()],[0],[1]])
226 | p2 = np.array([[o2[0].tolist()],[o2[2].tolist()],[0],[1]])
227 | ang = np.arctan2(p2[1]-p1[1],p2[0]-p1[0]) + delta * np.pi/180.
228 |
229 | dp = np.array([p2[0]-p1[0],p2[1]-p1[1],1])
230 | Rot = np.array([[cos(-(ang-pi/2)),-sin(-(ang-pi/2)),0],
231 | [sin(-(ang-pi/2)), cos(-(ang-pi/2)),0],
232 | [ 0 , 0 ,1]])
233 | new_p1 = np.array([0,0]).T
234 | new_p2 = np.dot(Rot,dp).T
235 | new_p2 = new_p2[0:2]
236 | New_Map=[]
237 | New_Map.append([new_p1[0],new_p1[1],o1[3]])
238 | New_Map.append([new_p2[0],new_p2[1],o2[3]])
239 | for k in range(len(L)):
240 | if L[k,0] == o1[0] and L[k,2] == o1[2]:
241 | continue
242 | if L[k,0] == o2[0] and L[k,2] == o2[2]:
243 | continue
244 | dP2 = np.array([[L[k,0]-p1[0]],[L[k,2]-p1[1]],[1]])
245 | new_P = np.dot(Rot,dP2).T
246 | New_Map.append([new_P[0,0],new_P[0,1],L[k,3].tolist()])
247 |
248 | return New_Map
249 |
250 | def get_translation(self,obj1,obj2,scale,d_teta):
251 |
252 | for i in range(len(self.L1)):
253 | map_obj = self.L1[i]
254 | if map_obj[3] == obj1:
255 | obj1_L1_loc = self.L1[i][:]
256 | if map_obj[3] == obj2:
257 | obj2_L1_loc = self.L1[i][:]
258 | for i in range(len(self.L2)):
259 | map_obj = self.L2[i]
260 | if map_obj[3] == obj1:
261 | obj1_L2_loc = self.L2[i][:]
262 | if map_obj[3] == obj2:
263 | obj2_L2_loc = self.L2[i][:]
264 |
265 | ## finding To1:
266 | p1 = np.array([obj1_L1_loc[0],obj1_L1_loc[2],0,1]).T
267 | p2 = np.array([obj2_L1_loc[0],obj2_L1_loc[2],0,1]).T
268 | ang = np.arctan2(p2[1]-p1[1],p2[0]-p1[0])
269 |
270 |
271 | To1 = np.array([[cos((ang-pi/2)),-sin((ang-pi/2)), 0, p1[0]],
272 | [ sin((ang-pi/2)), cos((ang-pi/2)), 0, p1[1]],
273 | [ 0 , 0 ,1, 0 ],
274 | [ 0 , 0 ,0, 1 ]])
275 |
276 | ## finding To2:
277 | p1 = np.array([obj1_L2_loc[0],obj1_L2_loc[2],0,1]).T
278 | p2 = np.array([obj2_L2_loc[0],obj2_L2_loc[2],0,1]).T
279 | ang = np.arctan2(p2[1]-p1[1],p2[0]-p1[0] )
280 | To2 = np.array([[cos((ang-pi/2)),-sin((ang-pi/2)) ,0, p1[0]],
281 | [ sin((ang-pi/2)), cos((ang-pi/2)) ,0, p1[1]],
282 | [ 0 , 0 ,1, 0 ],
283 | [ 0 , 0 ,0, 1 ]])
284 |
285 | To2[0,3] = To2[0,3] * scale
286 | To2[1,3] = To2[1,3] * scale
287 | To2[2,3] = To2[2,3] * scale
288 |
289 | inv_To2 = np.linalg.inv(To2)
290 | T12 = np.dot(To1,inv_To2)
291 |
292 | return T12
293 |
294 | def transfer_motions(self,cur_cam,Proj):
295 | #### publish repeat camera frames
296 | cam_rot = Proj[0:3,0:3]
297 | x2 = float(cur_cam[0]) * self.scale
298 | y2 = float(cur_cam[1]) * self.scale
299 | z2 = float(cur_cam[2]) * self.scale
300 |
301 | new_T12 = np.array([[self.T12[0,0],0,-self.T12[0,1]],
302 | [ 0, 1 , 0 ],
303 | [-self.T12[1,0],0,self.T12[1,1]]])
304 |
305 | pos = np.array([x2,z2,0,1]).T
306 | new_position = np.dot(self.T12,pos)
307 | new_rot = np.dot(new_T12,cam_rot)
308 | print(cam_rot)
309 | print(new_rot)
310 | print()
311 | r = R.from_matrix(new_rot)
312 | new_quat = r.as_quat()
313 | new_euler = r.as_euler('XZY') * 180/np.pi
314 | print("new_euler is:\n"+str(new_euler))
315 |
316 | r2 = R.from_matrix(cam_rot)
317 | old_quat = r2.as_quat()
318 | old_euler = r2.as_euler('XZY')* 180/np.pi
319 |
320 | print("old_euler is:\n"+str(old_euler))
321 | print()
322 |
323 |
324 | if new_position[0] !=0 and new_position[1] != 0:
325 | with open('/home/mohammad/Desktop/new_cam.csv', mode='a') as proj_file:
326 | proj_writer = csv.writer(proj_file, delimiter=',')
327 | proj_writer.writerow([new_position[0],y2,new_position[1],new_quat[0],new_quat[1],new_quat[2],new_quat[3]])
328 |
329 | new_cam = PoseStamped()
330 | new_cam.header.stamp = rospy.Time.now()
331 | new_cam.header.frame_id = "map"
332 | new_cam.pose.position.x = new_position[0]
333 | new_cam.pose.position.y = y2
334 | new_cam.pose.position.z = new_position[1]
335 |
336 | new_cam.pose.orientation.x = new_quat[0]
337 | new_cam.pose.orientation.y = new_quat[1]
338 | new_cam.pose.orientation.z = new_quat[2]
339 | new_cam.pose.orientation.w = -new_quat[3] # was -
340 |
341 | self.new_cam_pub.publish(new_cam)
342 |
343 | #### publish new objs
344 | cloud = PointCloud();
345 | classes = ChannelFloat32()
346 | geo_points = []
347 | obj_classes = []
348 | for j in range(len(self.L2)):
349 | obj = self.L2[j]
350 | obj_pos = np.array([self.scale*obj[0], self.scale*obj[2],0,1]).T
351 | res = np.dot(self.T12 , obj_pos)
352 | pt = Point32()
353 | pt.x = res[0]
354 | pt.y = self.scale*obj[1]
355 | pt.z = res[1]
356 | geo_points.append(pt)
357 | obj_classes.append(obj[3])
358 |
359 | classes.values = np.array(obj_classes)
360 | classes.name = "modified objects in map"
361 | cloud.header.frame_id = "map"
362 | cloud.header.stamp = rospy.Time.now()
363 | cloud.points = geo_points
364 | cloud.channels = [classes]
365 | self.new_obj_pub.publish(cloud)
366 |
367 |
368 | def hungarian(self,L1_main,L2_main):
369 | res = []
370 | [max_dis_L1,max_dis_L2]=self.max_dis()
371 | dteta = np.array([0])
372 | for delta in dteta:
373 | L1 = np.array(L1_main)
374 | L2 = np.array(L2_main)
375 |
376 | for i in range(len(L2)-1,-1,-1):
377 | cur_obj = L2[i]
378 | found = 0
379 | for j in range(len(L1)-1,-1,-1):
380 | prev_obj = L1[j]
381 | if prev_obj[3] == cur_obj[3]:
382 | found = 1
383 | if found == 0:
384 | L2 = np.delete(L2,i,0)
385 |
386 | for i in range(len(L1)-1,-1,-1):
387 | cur_obj = L1[i]
388 | found = 0
389 | for j in range(len(L2)-1,-1,-1):
390 | prev_obj = L2[j]
391 | if prev_obj[3] == cur_obj[3]:
392 | found = 1
393 | if found == 0:
394 | L1 = np.delete(L1,i,0)
395 |
396 | L1_maps = []
397 | for i in range(len(L1)):
398 | for j in range(len(L1)):
399 | if i == j:
400 | continue
401 |
402 | dis = np.linalg.norm([L1[i,0]-L1[j,0],L1[i,2]-L1[j,2]])
403 | if max_dis_L1/dis>5:
404 | continue
405 |
406 | New_Map = self.theta(L1[i,:],L1[j,:],L1,0)
407 | L1_maps.append(New_Map)
408 |
409 | L2_maps = []
410 | for i in range(len(L2)):
411 | for j in range(len(L2)):
412 | if i == j:
413 | continue
414 |
415 | dis = np.linalg.norm([L2[i,0]-L2[j,0],L2[i,2]-L2[j,2]])
416 | if max_dis_L2/dis>5:
417 | continue
418 |
419 | New_Map = self.theta(L2[i,:],L2[j,:],L2,delta)
420 | L2_maps.append(New_Map)
421 |
422 |
423 | for i in range(len(L2_maps)):
424 | cur_frame = L2_maps[i]
425 | cur_orig = cur_frame[0]
426 | cur_aim = cur_frame[1]
427 | cur_others = np.array(cur_frame[2:])
428 | for j in range(len(L1_maps)):
429 | prev_frame = L1_maps[j]
430 | prev_orig = prev_frame[0]
431 | prev_aim = prev_frame[1]
432 | prev_others = np.array(prev_frame[2:])
433 |
434 | if cur_orig[2] != prev_orig[2] or cur_aim[2] != prev_aim[2]:
435 | continue
436 |
437 | # scaling
438 | map_ratio = prev_aim[1]/cur_aim[1]
439 | s_range = map_ratio
440 | for S in s_range:
441 | er_list = []
442 | error = np.linalg.norm(np.array([prev_aim[0] - S*cur_aim[0] , prev_aim[1] - S*cur_aim[1]]))
443 | for k in range(len(prev_others)):
444 | for m in range(len(cur_others)):
445 | if prev_others[k,2] != cur_others[m,2]:
446 | continue
447 | dis = np.array([prev_others[k,0] - S*cur_others[m,0] , prev_others[k,1] - S*cur_others[m,1]])
448 | er = np.linalg.norm(dis)
449 | er_list.append(er)
450 | if len(er_list) != 0:
451 | sorted_er = sorted(er_list)
452 | if len(L2)/2. % 1 == 0:
453 | er_objs = len(L2)/2. -1
454 | else:
455 | er_objs = np.floor(len(L2)/2.)
456 |
457 | chosen_er = sorted_er[0:int(er_objs)]
458 | if len(chosen_er)>1:
459 | chosen_er = sum(chosen_er)
460 | error = error + chosen_er
461 |
462 | res.append([i,S,delta,error])
463 |
464 | res = np.array(res)
465 | sort = res[res[:,3].argsort()]
466 | scale = sort[0,1]
467 | d_teta = sort[0,2]
468 | best_map_id = int(sort[0,0])
469 | best_map = L2_maps[best_map_id]
470 | obj1 = best_map[0][2]
471 | obj2 = best_map[1][2]
472 | T12 = self.get_translation(obj1,obj2,scale,d_teta)
473 |
474 | return [T12,scale]
475 |
476 | def reloc(self):
477 |
478 | [self.T12,self.scale] = self.hungarian(self.L1,self.L2)
479 | print(self.T12)
480 | print(self.scale)
481 | if self.scale != self.prev_scale:
482 | print("new scale")
483 |
484 | print(self.scale)
485 | print(self.prev_scale)
486 | print()
487 | self.prev_scale = copy.deepcopy(self.scale)
488 |
489 |
490 |
491 |
492 | if __name__ == '__main__':
493 |
494 | cur_cam_file = '/home/mohammad/Desktop/cur_cam.csv'
495 | if os.path.exists(cur_cam_file):
496 | os.remove(cur_cam_file)
497 |
498 | new_cam_file = '/home/mohammad/Desktop/new_cam.csv'
499 | if os.path.exists(new_cam_file):
500 | os.remove(new_cam_file)
501 |
502 | new_cam_file = '/home/mohammad/Desktop/init_path.csv'
503 | if os.path.exists(new_cam_file):
504 | os.remove(new_cam_file)
505 |
506 |
507 | camera1 = []
508 | teach_map = []
509 |
510 | with open('/home/mohammad/Desktop/C1.csv', mode = 'r') as cam1:
511 | teach_cam = csv.reader(cam1, delimiter=',',quoting=csv.QUOTE_NONNUMERIC)
512 | for row in teach_cam:
513 | camera1.append(row)
514 |
515 |
516 | with open('/home/mohammad/Desktop/M1.csv', mode = 'r') as map1:
517 | teach_csv = csv.reader(map1, delimiter=',',quoting=csv.QUOTE_NONNUMERIC)
518 | for row in teach_csv:
519 | teach_map.append(row)
520 | print(teach_map)
521 | repeating(teach_map, camera1)
522 | rospy.spin()
523 |
524 |
525 |
--------------------------------------------------------------------------------
/launch/create_map.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | semantic_visual_teach_repeat
4 | 0.0.0
5 | The semantic_visual_teach_repeat package
6 |
7 |
8 |
9 |
10 | mohammad
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | geometry_msgs
52 | sensor_msgs
53 | visualization_msgs
54 | message_filters
55 | geometry_msgs
56 | sensor_msgs
57 | visualization_msgs
58 | message_filters
59 | geometry_msgs
60 | sensor_msgs
61 | visualization_msgs
62 | message_filters
63 | catkin
64 | roscpp
65 | rospy
66 | std_msgs
67 | roscpp
68 | rospy
69 | std_msgs
70 | roscpp
71 | rospy
72 | std_msgs
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/semantic_mapping/map.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import rospy
3 | from darknet_ros_msgs.msg import BoundingBoxes
4 | from geometry_msgs.msg import PoseStamped,Point32
5 | from sensor_msgs.msg import PointCloud,ChannelFloat32,JointState
6 | import cv2
7 | import numpy as np
8 | import message_filters
9 | from numpy import cos,sin,pi
10 | import csv
11 | import os
12 |
13 | class mapping():
14 | def __init__(self):
15 | self.objs = []
16 | self.obj_map = []
17 | self.prev_objects = []
18 | # camera information
19 | self.im_size_x = 1280 #720 for habitat
20 | self.im_size_y = 720
21 | rospy.init_node('listener', anonymous=True)
22 | self.bb_sub = message_filters.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes)
23 | # self.cam_sub = message_filters.Subscriber('/keyframe_pose', PoseStamped)
24 | # self.cam_sub = message_filters.Subscriber('/orb_pose', PoseStamped)
25 | self.op_sub = message_filters.Subscriber('/obj_position', PointCloud)
26 | self.obj_pub = rospy.Publisher('/map_objects', PointCloud , queue_size=1)
27 | self.ts = message_filters.ApproximateTimeSynchronizer([self.bb_sub,self.op_sub], 10, 0.1)
28 | self.ts.registerCallback(self.gotdata)
29 | print("start")
30 |
31 | def gotdata(self,bb,op):
32 |
33 | # big_objs = [59,2,5,8,9,13,20,57,60,61,62,72,80]
34 | # medium_objs = [0,1,3,9,11,30,31,56,58,63,66,68,69,71,74,77]
35 | # small_objs = [14,15,16,24,25,26,27,28,35,38,39,40,41,42,43,
36 | # 44,45,46,47,48,49,54,55,64,65,67,70,73,75,76,79]
37 |
38 | big_objs = [12,13,14,19,20,22]
39 | medium_objs = [0,1,2,3,4,5,11,15,18,21,23]
40 | small_objs = [6,7,8,9,10,16,17]
41 |
42 | points = []
43 | object_position = op.points
44 | data_op = op.channels[0].values
45 | for i in range(len(op.points)):
46 | x_p = object_position[i].x
47 | y_p = object_position[i].y
48 | z_p = object_position[i].z
49 | id_p = data_op[5*i]
50 | xmin_p = data_op[5*i+1]
51 | xmax_p = data_op[5*i+2]
52 | ymin_p = data_op[5*i+3]
53 | ymax_p = data_op[5*i+4]
54 | if id_p == 23:
55 | print("clock at: "+ str([x_p,y_p,z_p]))
56 |
57 | details_obj = [x_p,y_p,z_p,id_p,0,xmin_p,xmax_p,ymin_p,ymax_p]
58 | points.append(details_obj)
59 | self.prev_objects = points
60 |
61 | if(len(self.objs)==0):
62 | self.objs = np.array(points)
63 | return 0
64 |
65 | else:
66 | for i in range(len(points)):
67 | matched = 0
68 | new_obj = np.array(points[i])
69 | cur_x_mid = (new_obj[5]+new_obj[6])/2
70 | cur_y_mid = (new_obj[7]+new_obj[8])/2
71 | for j in range(len(self.objs)):
72 | prev_obj = self.objs[j,:]
73 | prev_x_mid = (prev_obj[5]+prev_obj[6])/2
74 | prev_y_mid = (prev_obj[7]+prev_obj[8])/2
75 | if prev_obj[3] == new_obj[3]:
76 | dist = np.sqrt((prev_obj[0]-new_obj[0])**2 + (prev_obj[1]-new_obj[1])**2 + (prev_obj[2]-new_obj[2])**2)
77 | if dist<0.5:
78 | self.objs[j,0] = new_obj[0]
79 | self.objs[j,1] = new_obj[1]
80 | self.objs[j,2] = new_obj[2]
81 | self.objs[j,4] = self.objs[j,4] + 1
82 | matched = 1
83 |
84 | if matched == 0 and abs(cur_x_mid-prev_x_mid) < self.im_size_x/15 and abs(cur_y_mid-prev_y_mid) < self.im_size_y/15:
85 | print("found similar")
86 | self.objs[j,0] = new_obj[0]
87 | self.objs[j,1] = new_obj[1]
88 | self.objs[j,2] = new_obj[2]
89 | self.objs[j,4] = self.objs[j,4] + 1
90 | matched = 1
91 |
92 | if matched == 0:
93 | self.objs = np.vstack((self.objs,np.array(new_obj)))
94 | if(len(self.obj_map) == 0):
95 | my_obj = self.objs
96 | for i in range(len(self.objs)-1,-1,-1):
97 | if(self.objs[i,4]>2):
98 | self.obj_map.append(self.objs[i,:])
99 | self.objs = np.delete(self.objs,i,0)
100 |
101 | else:
102 | self.objs = np.array(self.objs)
103 | self.obj_map = np.array(self.obj_map)
104 | for i in range(len(self.objs)-1,-1,-1):
105 | if(self.objs[i,4]>2):
106 | all_matched = 0
107 | for j in range(len(self.obj_map)-1,-1,-1):
108 |
109 | if self.obj_map[j,3] == self.objs[i,3]:
110 | dist2 = np.sqrt((self.obj_map[j,0]-self.objs[i,0])**2 + (self.obj_map[j,1]-self.objs[i,1])**2 + (self.obj_map[j,2]-self.objs[i,2])**2)
111 | if self.obj_map[j,3] in big_objs:
112 | thresh = 1.5
113 | elif self.obj_map[j,3] in medium_objs:
114 | thresh = 0.75
115 | else:
116 | thresh = 0.5
117 |
118 | if dist21)
42 | bigger_than_one_mid = bigger_than_one_mid[0]
43 | if len(bigger_than_one_mid)>0:
44 | for i in bigger_than_one_mid:
45 | delete_list_mid = np.where((points_cl_mid[:,0:3] == res_mid[0][i]).all(axis=1))
46 | delete_list_mid = delete_list_mid[0]
47 | delete_list_mid = delete_list_mid.tolist()
48 | delete_list_mid.reverse()
49 | for k in delete_list_mid:
50 | points_cl_mid = np.delete(points_cl_mid,k,0)
51 | return points_cl_mid
52 |
53 | def gotdata(self,bound,cam_pose,pc):
54 | # print("worked")
55 | points_cat = []
56 | points_cat_mid = []
57 | objects = []
58 | objects_info = []
59 |
60 | c=np.zeros(3)
61 | R=np.zeros(4)
62 |
63 | c[0] = cam_pose.pose.position.x
64 | c[1] = cam_pose.pose.position.y
65 | c[2] = cam_pose.pose.position.z
66 |
67 | R[0] = cam_pose.pose.orientation.x
68 | R[1] = cam_pose.pose.orientation.y
69 | R[2] = cam_pose.pose.orientation.z
70 | R[3] = cam_pose.pose.orientation.w
71 |
72 | with open('/home/mohammad/Desktop/projection.csv', mode='a') as proj_file:
73 | proj_writer = csv.writer(proj_file, delimiter=',')
74 | proj_writer.writerow([c[0],c[1],c[2],R[0],R[1],R[2],R[3]])
75 |
76 | K = self.K
77 | Rot = np.array([[1-2*(R[1]**2)-2*(R[2]**2),2*R[0]*R[1]+2*R[2]*R[3],2*R[0]*R[2]-2*R[1]*R[3],0],
78 | [2*R[0]*R[1]-2*R[2]*R[3],1-2*(R[0]**2)-2*(R[2]**2),2*R[1]*R[2]+2*R[0]*R[3],0],
79 | [2*R[0]*R[2]+2*R[1]*R[3],2*R[1]*R[2]-2*R[0]*R[3],1-2*(R[0]**2)-2*(R[1]**2),0],
80 | [0,0,0,1]])
81 |
82 | Proj = np.zeros((3,4))
83 | Proj[0:3,0:3] = Rot[0:3,0:3]
84 | tcw_ros = np.array([[-c[0]],[-c[1]],[-c[2]]])
85 | tcw_orb = np.dot(Rot[0:3,0:3],tcw_ros)
86 |
87 | Proj[0,3]=tcw_orb[0]
88 | Proj[1,3]=tcw_orb[1]
89 | Proj[2,3]=tcw_orb[2]
90 | cur_cam = cam_pose.pose.position
91 |
92 | self.Projection = np.dot(K,Proj)
93 | d=np.array([self.prev_cam[0]-cur_cam.x,self.prev_cam[1]-cur_cam.y,self.prev_cam[2]-cur_cam.z])
94 | self.prev_cam = np.array([cur_cam.x,cur_cam.y,cur_cam.z])
95 | geo_points = pc.points
96 | channels = pc.channels
97 | obj = 0
98 |
99 | for bb in bound.bounding_boxes:
100 | current_time = rospy.Time.now()
101 | ins = []
102 | xmin = bb.xmin
103 | xmax = bb.xmax
104 | ymin = bb.ymin
105 | ymax = bb.ymax
106 | Class = bb.Class
107 | id_obj = bb.id
108 | prob = bb.probability
109 | if Class == "None":
110 | obj = obj - 1
111 | continue
112 | obj = obj + 1
113 | xmid = (xmin+xmax)/2
114 | ymid = (ymin+ymax)/2
115 | print("I saw a "+Class)
116 | if xmin < self.im_size_x/25 or xmax > 24*self.im_size_x/25 or ymin < self.im_size_y/25:
117 | continue
118 | print("considered "+Class)
119 |
120 | for i in range(len(geo_points)):
121 | fx = geo_points[i].x
122 | fy = geo_points[i].y
123 | fz = geo_points[i].z
124 | pt3d = np.array([[fx],[fy],[fz],[1]])
125 | pt2d = np.dot(self.Projection,pt3d)
126 | pt2d[0] = pt2d[0] / pt2d[2]
127 | pt2d[1] = pt2d[1] / pt2d[2]
128 | pt2d[2] = pt2d[2] / pt2d[2]
129 |
130 | if pt2d[0]0:
138 | median_z = np.median(ins[:,3]) # z is forward
139 | for i in range(len(ins)-1,-1,-1):
140 | if (ins[i,3]-median_z) > 0.5:
141 | ins = np.delete(ins,i,axis=0)
142 |
143 | if len(ins) == 0:
144 | continue
145 | ins = ins[ins[:,0].argsort()]
146 | if len(ins)>3:
147 | closests = ins[0:3]
148 | else:
149 | closests = ins
150 | closests = np.array(closests)
151 |
152 | print("closests are\n"+str(closests))
153 |
154 | for j in range(len(closests)-1,0,-1):
155 | dc_v = [closests[j,1]-closests[0,1] ,closests[j,2]-closests[0,2], closests[j,3]-closests[0,3]]
156 | dc = np.linalg.norm(dc_v)
157 | if dc>1:
158 | closests = np.delete(closests,j,axis=0)
159 |
160 | for j in range(len(closests)-1,-1,-1):
161 | if closests[j,0] > 30:
162 | closests = np.delete(closests,j,axis=0)
163 | if len(closests) == 0:
164 | continue
165 |
166 | Xs = closests[:,1]
167 | Ys = closests[:,2]
168 | Zs = closests[:,3]
169 | X = np.mean(Xs)
170 | Y = np.mean(Ys)
171 | Z = np.mean(Zs)
172 | points = [X,Y,Z,Class,obj,id_obj,xmid,ymid,xmin,xmax,ymin,ymax]
173 | points_cat.append(points)
174 |
175 | points_cl = np.array(points_cat)
176 | for i in range(len(points_cl)):
177 | if(len(points_cl)==0):
178 | break
179 |
180 | print("saw a "+points_cl[i,3]+ " at: "+str([float(points_cl[i,0]),float(points_cl[i,1]),float(points_cl[i,2])]))
181 | object_position = Point32()
182 | object_position.x = float(points_cl[i,0])
183 | object_position.y = float(points_cl[i,1])
184 | object_position.z = float(points_cl[i,2])
185 | objects.append(object_position)
186 | objects_info.append(float(points_cl[i,5]))
187 | objects_info.append(float(points_cl[i,8]))
188 | objects_info.append(float(points_cl[i,9]))
189 | objects_info.append(float(points_cl[i,10]))
190 | objects_info.append(float(points_cl[i,11]))
191 | self.object_class.values = np.array(objects_info)
192 | self.object_class.name = "objects"
193 |
194 | self.founded_objects.points = objects
195 | self.founded_objects.channels = [self.object_class]
196 | self.founded_objects.header.stamp = rospy.Time.now()
197 | self.founded_objects.header.frame_id = "map"
198 | self.obj_pub.publish(self.founded_objects)
199 |
200 | if __name__ == '__main__':
201 | path_file = '/home/mohammad/Desktop/projection.csv'
202 | if os.path.exists(path_file):
203 | os.remove(path_file)
204 | obj_position()
205 | rospy.spin()
206 |
--------------------------------------------------------------------------------
/src/show_map.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include "ros/ros.h"
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include "tf/transform_datatypes.h"
11 | #include
12 | #include
13 | //#include
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | using namespace std;
20 |
21 | class map_show{
22 |
23 | private:
24 | ros::NodeHandle n;
25 | ros::Subscriber Mo_sub;
26 | ros::Subscriber Kf_sub;
27 | ros::Subscriber NMo_sub;
28 | ros::Subscriber NKf_sub;
29 | ros::Subscriber NKf_nd_sub;
30 | ros::Subscriber teach_Mo_sub;
31 | ros::Subscriber teach_Kf_sub;
32 | ros::Subscriber goal_points_sub;
33 | // ros::Publisher marker_pub_cam;
34 | ros::Publisher marker_pub_obj;
35 | ros::Publisher marker_pub_cam_arr;
36 | ros::Publisher new_marker_pub_obj;
37 | ros::Publisher new_marker_pub_cam_arr;
38 | ros::Publisher new_marker_pub_cam_arr_nd;
39 | ros::Publisher teach_marker_pub_obj;
40 | ros::Publisher teach_marker_pub_cam_arr;
41 | ros::Publisher goal_points_pub;
42 |
43 | public:
44 | std::vector all_objects;
45 | std::vector new_all_objects;
46 | std::vector teach_all_objects;
47 | std::vector all_goal_points;
48 |
49 | float x_obj;
50 | float y_obj;
51 | float z_obj;
52 | float x_cam;
53 | float y_cam;
54 | float z_cam;
55 | float R[4];
56 |
57 | float nx_obj;
58 | float ny_obj;
59 | float nz_obj;
60 | float nx_cam;
61 | float ny_cam;
62 | float nz_cam;
63 | float NR[4];
64 |
65 | float nx_nd_cam;
66 | float ny_nd_cam;
67 | float nz_nd_cam;
68 | float NR_nd[4];
69 |
70 | float tx_obj;
71 | float ty_obj;
72 | float tz_obj;
73 | float tx_cam;
74 | float ty_cam;
75 | float tz_cam;
76 | float TR[4];
77 |
78 | float x_gp;
79 | float y_gp;
80 | float z_gp;
81 |
82 | std::vector all_cam_markers;
83 | std::vector new_all_cam_markers;
84 | std::vector new_all_cam_markers_nodrift;
85 | std::vector teach_cam_markers;
86 |
87 | // std::vector all_markers;
88 | visualization_msgs::MarkerArray Markerarr;
89 | visualization_msgs::MarkerArray Posesarr;
90 | visualization_msgs::MarkerArray New_Markerarr;
91 | visualization_msgs::MarkerArray New_Posesarr;
92 | visualization_msgs::MarkerArray New_Posesarr_nd;
93 | visualization_msgs::MarkerArray teach_Markerarr;
94 | visualization_msgs::MarkerArray teach_Posesarr;
95 | visualization_msgs::MarkerArray goal_pointsarr;
96 |
97 |
98 | geometry_msgs::PoseStamped pose;
99 | geometry_msgs::PoseStamped new_pose;
100 | geometry_msgs::PoseStamped teach_pose;
101 |
102 | float init_height = 1;
103 |
104 | map_show():
105 | n("~"){
106 |
107 |
108 | Mo_sub = n.subscribe("/map_objects",1,&map_show::Objects , this);
109 | Kf_sub = n.subscribe("/orb_pose",1,&map_show::Camera_poses , this);
110 | marker_pub_obj = n.advertise("objects_marker_array", 1);
111 | // marker_pub_cam = n.advertise("camera_pose_marker", 1);
112 | NMo_sub = n.subscribe("/new_map_objects",1,&map_show::New_Objects , this);
113 | NKf_sub = n.subscribe("/new_orb_pose",1,&map_show::New_Camera_poses , this);
114 | NKf_nd_sub = n.subscribe("/orb_pose_drifted",1,&map_show::New_Camera_poses_drifted , this);
115 |
116 | teach_Mo_sub = n.subscribe("/teach_map_objects",1,&map_show::teach_Objects , this);
117 | teach_Kf_sub = n.subscribe("/teach_orb_pose",1,&map_show::teach_Camera_poses , this);
118 |
119 | goal_points_sub = n.subscribe("/goal_points_pose",1,&map_show::goal_points , this);
120 |
121 | teach_marker_pub_obj = n.advertise("teach_objects_marker_array", 1);
122 | teach_marker_pub_cam_arr = n.advertise("teach_camera_marker_array", 1);
123 |
124 | goal_points_pub = n.advertise("goal_points_marker_array", 1);
125 | marker_pub_cam_arr = n.advertise("camera_pose_marker_array", 1);
126 | new_marker_pub_obj = n.advertise("new_objects_marker_array", 1);
127 | // marker_pub_cam = n.advertise("camera_pose_marker", 1);
128 | new_marker_pub_cam_arr = n.advertise("new_camera_pose_marker_array", 1);
129 | new_marker_pub_cam_arr_nd = n.advertise("camera_pose_marker_array_drifted", 1);
130 |
131 |
132 | }
133 | ~map_show(){}
134 |
135 | void Objects(const sensor_msgs::PointCloud::ConstPtr& msg1){
136 | all_objects = msg1->points;
137 | Markerarr.markers.resize(all_objects.size());
138 | for(size_t i=0 ; i(0,0);
155 | float y_obj_tr = OBJ_transformed.at(1,0);
156 | float z_obj_tr = OBJ_transformed.at(2,0);
157 |
158 | Markerarr.markers[i].header.stamp = ros::Time();
159 | Markerarr.markers[i].header.frame_id = "world";
160 | Markerarr.markers[i].ns = "object";
161 | Markerarr.markers[i].id = i;
162 | Markerarr.markers[i].type = visualization_msgs::Marker::SPHERE;
163 | Markerarr.markers[i].action = visualization_msgs::Marker::ADD;
164 | Markerarr.markers[i].pose.position.x = z_obj;
165 | Markerarr.markers[i].pose.position.y = -x_obj;
166 | Markerarr.markers[i].pose.position.z = -y_obj+init_height;
167 | Markerarr.markers[i].pose.orientation.x = 0;
168 | Markerarr.markers[i].pose.orientation.y = 0;
169 | Markerarr.markers[i].pose.orientation.z = 0;
170 | Markerarr.markers[i].pose.orientation.w = 0;
171 | Markerarr.markers[i].scale.x = 0.1;
172 | Markerarr.markers[i].scale.y = 0.1;
173 | Markerarr.markers[i].scale.z = 0.1;
174 | Markerarr.markers[i].color.r = 1.0f;
175 | Markerarr.markers[i].color.g = 0.0f;
176 | Markerarr.markers[i].color.b = 0.0f;
177 | Markerarr.markers[i].color.a = 1.0f;
178 | Markerarr.markers[i].lifetime = ros::Duration();
179 | }
180 | marker_pub_obj.publish(Markerarr);
181 | }
182 |
183 | void New_Objects(const sensor_msgs::PointCloud::ConstPtr& msg3){
184 | new_all_objects = msg3->points;
185 | New_Markerarr.markers.resize(new_all_objects.size());
186 | for(size_t i=0 ; i(0,0);
203 | float ny_obj_tr = OBJ_transformed.at(1,0);
204 | float nz_obj_tr = OBJ_transformed.at(2,0);
205 |
206 | New_Markerarr.markers[i].header.stamp = ros::Time();
207 | New_Markerarr.markers[i].header.frame_id = "world";
208 | New_Markerarr.markers[i].ns = "object";
209 | New_Markerarr.markers[i].id = i;
210 | New_Markerarr.markers[i].type = visualization_msgs::Marker::CUBE;
211 | New_Markerarr.markers[i].action = visualization_msgs::Marker::ADD;
212 | New_Markerarr.markers[i].pose.position.x = nz_obj;
213 | New_Markerarr.markers[i].pose.position.y = -nx_obj;
214 | New_Markerarr.markers[i].pose.position.z = -ny_obj+init_height;
215 | New_Markerarr.markers[i].pose.orientation.x = 0;
216 | New_Markerarr.markers[i].pose.orientation.y = 0;
217 | New_Markerarr.markers[i].pose.orientation.z = 0;
218 | New_Markerarr.markers[i].pose.orientation.w = 0;
219 | New_Markerarr.markers[i].scale.x = 0.1;
220 | New_Markerarr.markers[i].scale.y = 0.1;
221 | New_Markerarr.markers[i].scale.z = 0.1;
222 | New_Markerarr.markers[i].color.r = 0.0f;
223 | New_Markerarr.markers[i].color.g = 0.0f;
224 | New_Markerarr.markers[i].color.b = 1.0f;
225 | New_Markerarr.markers[i].color.a = 1.0f;
226 | New_Markerarr.markers[i].lifetime = ros::Duration();
227 | }
228 | new_marker_pub_obj.publish(New_Markerarr);
229 | }
230 |
231 |
232 | void teach_Objects(const sensor_msgs::PointCloud::ConstPtr& msg5){
233 | teach_all_objects = msg5->points;
234 | teach_Markerarr.markers.resize(teach_all_objects.size());
235 | for(size_t i=0 ; i(0,0);
252 | float ty_obj_tr = OBJ_transformed.at(1,0);
253 | float tz_obj_tr = OBJ_transformed.at(2,0);
254 |
255 | teach_Markerarr.markers[i].header.stamp = ros::Time();
256 | teach_Markerarr.markers[i].header.frame_id = "world";
257 | teach_Markerarr.markers[i].ns = "object";
258 | teach_Markerarr.markers[i].id = i;
259 | teach_Markerarr.markers[i].type = visualization_msgs::Marker::CUBE;
260 | teach_Markerarr.markers[i].action = visualization_msgs::Marker::ADD;
261 | teach_Markerarr.markers[i].pose.position.x = tz_obj;
262 | teach_Markerarr.markers[i].pose.position.y = -tx_obj;
263 | teach_Markerarr.markers[i].pose.position.z = -ty_obj+init_height;
264 | teach_Markerarr.markers[i].pose.orientation.x = 0;
265 | teach_Markerarr.markers[i].pose.orientation.y = 0;
266 | teach_Markerarr.markers[i].pose.orientation.z = 0;
267 | teach_Markerarr.markers[i].pose.orientation.w = 0;
268 | teach_Markerarr.markers[i].scale.x = 0.1;
269 | teach_Markerarr.markers[i].scale.y = 0.1;
270 | teach_Markerarr.markers[i].scale.z = 0.1;
271 | teach_Markerarr.markers[i].color.r = 1.0f;
272 | teach_Markerarr.markers[i].color.g = 0.0f;
273 | teach_Markerarr.markers[i].color.b = 0.0f;
274 | teach_Markerarr.markers[i].color.a = 1.0f;
275 | teach_Markerarr.markers[i].lifetime = ros::Duration();
276 |
277 | }
278 | cout<<"hiii"<points;
284 | goal_pointsarr.markers.resize(all_goal_points.size());
285 | for(size_t i=0 ; i(0,0);
304 | float ty_obj_tr = OBJ_transformed.at(1,0);
305 | float tz_obj_tr = OBJ_transformed.at(2,0);
306 |
307 | goal_pointsarr.markers[i].header.stamp = ros::Time();
308 | goal_pointsarr.markers[i].header.frame_id = "world";
309 | goal_pointsarr.markers[i].ns = "object";
310 | goal_pointsarr.markers[i].id = i;
311 | goal_pointsarr.markers[i].type = visualization_msgs::Marker::SPHERE;
312 | goal_pointsarr.markers[i].action = visualization_msgs::Marker::ADD;
313 | goal_pointsarr.markers[i].pose.position.x = z_gp;
314 | goal_pointsarr.markers[i].pose.position.y = -x_gp;
315 | goal_pointsarr.markers[i].pose.position.z = -y_gp+init_height;
316 | goal_pointsarr.markers[i].pose.orientation.x = 0;
317 | goal_pointsarr.markers[i].pose.orientation.y = 0;
318 | goal_pointsarr.markers[i].pose.orientation.z = 0;
319 | goal_pointsarr.markers[i].pose.orientation.w = 0;
320 | goal_pointsarr.markers[i].scale.x = 0.1;
321 | goal_pointsarr.markers[i].scale.y = 0.1;
322 | goal_pointsarr.markers[i].scale.z = 0.1;
323 | goal_pointsarr.markers[i].color.r = 1.0f;
324 | goal_pointsarr.markers[i].color.g = 0.0f;
325 | goal_pointsarr.markers[i].color.b = 1.0f;
326 | goal_pointsarr.markers[i].color.a = 1.0f;
327 | goal_pointsarr.markers[i].lifetime = ros::Duration();
328 | }
329 | goal_points_pub.publish(goal_pointsarr);
330 | }
331 |
332 | void Camera_poses(const geometry_msgs::PoseStamped::ConstPtr& msg2){
333 | x_cam = msg2->pose.position.x;
334 | y_cam = msg2->pose.position.y;
335 | z_cam = msg2->pose.position.z;
336 | // cout<<"cur cams are: "<pose.orientation.x;
339 | R[1] = msg2->pose.orientation.y;
340 | R[2] = msg2->pose.orientation.z;
341 | R[3] = msg2->pose.orientation.w;
342 |
343 | // cout<<"cams poses are: "< (0,0);
370 | proj[1][3] = tcw_orb.at (1,0);
371 | proj[2][3] = tcw_orb.at (2,0);
372 |
373 | cv::Mat Proj = cv::Mat(3, 4, CV_32FC1, &proj); //same as ORB_slam
374 | cv::Mat transform = TransformFromMat(Proj); //gets transformed to rviz format
375 | cv::Mat Rwc = transform.rowRange(0,3).colRange(0,3);
376 | cv::Mat twc = transform.rowRange(0,3).col(3);
377 | vector q = toQuaternion(Rwc);
378 |
379 | tf::Transform new_transform;
380 | new_transform.setOrigin(tf::Vector3(twc.at(0, 0), twc.at(0, 1), twc.at(0, 2)));
381 |
382 | tf::Quaternion quaternion(q[0], q[1], q[2], q[3]);
383 | new_transform.setRotation(quaternion);
384 | tf::poseTFToMsg(new_transform, pose.pose);
385 |
386 | if (twc.at(0, 0) != 0){
387 | save_poses(twc.at(0, 0),twc.at(1,0),twc.at(2,0),q);
388 | }
389 | visualize_cam();
390 |
391 | }
392 |
393 |
394 | void teach_Camera_poses(const geometry_msgs::PoseArray::ConstPtr& msg6){
395 | for(size_t i = 0;iposes.size();i++){
396 | tx_cam = msg6->poses[i].position.x;
397 | ty_cam = msg6->poses[i].position.y;
398 | tz_cam = msg6->poses[i].position.z;
399 |
400 | TR[0] = msg6->poses[i].orientation.x;
401 | TR[1] = msg6->poses[i].orientation.y;
402 | TR[2] = msg6->poses[i].orientation.z;
403 | TR[3] = msg6->poses[i].orientation.w;
404 |
405 | float Rot[3][3] = {{1-2*(TR[1]*TR[1])-2*(TR[2]*TR[2]),2*TR[0]*TR[1]+2*TR[2]*TR[3],2*TR[0]*TR[2]-2*TR[1]*TR[3]},
406 | {2*TR[0]*TR[1]-2*TR[2]*TR[3],1-2*(TR[0]*TR[0])-2*(TR[2]*TR[2]),2*TR[1]*TR[2]+2*TR[0]*TR[3]},
407 | {2*TR[0]*TR[2]+2*TR[1]*TR[3],2*TR[1]*TR[2]-2*TR[0]*TR[3],1-2*(TR[0]*TR[0])-2*(TR[1]*TR[1])}};
408 |
409 | cv::Mat Rotation = cv::Mat(3, 3, CV_32FC1, &Rot);
410 | //cout<<"Rotation is: "< (0,0);
429 | proj[1][3] = tcw_orb.at (1,0);
430 | proj[2][3] = tcw_orb.at (2,0);
431 |
432 | cv::Mat Proj = cv::Mat(3, 4, CV_32FC1, &proj); //same as ORB_slam
433 | cv::Mat transform = TransformFromMat(Proj); //gets transformed to rviz format
434 | cv::Mat Rwc = transform.rowRange(0,3).colRange(0,3);
435 | cv::Mat twc = transform.rowRange(0,3).col(3);
436 | vector q = toQuaternion(Rwc);
437 |
438 | tf::Transform new_transform;
439 | new_transform.setOrigin(tf::Vector3(twc.at(0, 0), twc.at(0, 1), twc.at(0, 2)));
440 |
441 | tf::Quaternion quaternion(q[0], q[1], q[2], q[3]);
442 | new_transform.setRotation(quaternion);
443 | tf::poseTFToMsg(new_transform, teach_pose.pose);
444 |
445 | if (twc.at(0, 0) != 0){
446 | teach_save_poses(twc.at(0, 0),twc.at(1,0),twc.at(2,0),q);
447 | }
448 |
449 | }
450 |
451 | teach_visualize_cam();
452 |
453 | }
454 |
455 |
456 | void New_Camera_poses(const geometry_msgs::PoseStamped::ConstPtr& msg4){
457 | nx_cam = msg4->pose.position.x;
458 | ny_cam = msg4->pose.position.y;
459 | nz_cam = msg4->pose.position.z;
460 |
461 | // cout<<"new cams are: "<pose.orientation.x;
464 | NR[1] = msg4->pose.orientation.y;
465 | NR[2] = msg4->pose.orientation.z;
466 | NR[3] = msg4->pose.orientation.w;
467 | // cout<<"new cams poses are: "< (0,0);
497 | proj[1][3] = tcw_orb.at (1,0);
498 | proj[2][3] = tcw_orb.at (2,0);
499 |
500 |
501 | cv::Mat Proj = cv::Mat(3, 4, CV_32FC1, &proj); //same as ORB_slam
502 | cv::Mat transform = TransformFromMat(Proj); //gets transformed to rviz format
503 | cv::Mat Rwc = transform.rowRange(0,3).colRange(0,3);
504 | cv::Mat twc = transform.rowRange(0,3).col(3);
505 | vector nq = toQuaternion(Rwc);
506 |
507 | tf::Transform new_transform;
508 | new_transform.setOrigin(tf::Vector3(twc.at(0, 0), twc.at(0, 1), twc.at(0, 2)));
509 |
510 | tf::Quaternion quaternion(nq[0], nq[1], nq[2], nq[3]);
511 | new_transform.setRotation(quaternion);
512 | tf::poseTFToMsg(new_transform, new_pose.pose);
513 |
514 | if (twc.at(0, 0) != 0){
515 | new_save_poses(twc.at(0, 0),twc.at(1,0),twc.at(2,0),nq);
516 | }
517 | new_visualize_cam();
518 |
519 | }
520 |
521 | void New_Camera_poses_drifted(const geometry_msgs::PoseStamped::ConstPtr& msg8){
522 | nx_nd_cam = msg8->pose.position.x;
523 | ny_nd_cam = msg8->pose.position.y;
524 | nz_nd_cam = msg8->pose.position.z;
525 |
526 | // cout<<"new cams are: "<pose.orientation.x;
529 | NR_nd[1] = msg8->pose.orientation.y;
530 | NR_nd[2] = msg8->pose.orientation.z;
531 | NR_nd[3] = msg8->pose.orientation.w;
532 | // cout<<"new cams poses are: "< (0,0);
559 | proj[1][3] = tcw_orb.at (1,0);
560 | proj[2][3] = tcw_orb.at (2,0);
561 |
562 |
563 | cv::Mat Proj = cv::Mat(3, 4, CV_32FC1, &proj); //same as ORB_slam
564 | cv::Mat transform = TransformFromMat(Proj); //gets transformed to rviz format
565 | cv::Mat Rwc = transform.rowRange(0,3).colRange(0,3);
566 | cv::Mat twc = transform.rowRange(0,3).col(3);
567 | vector nq = toQuaternion(Rwc);
568 |
569 | tf::Transform new_transform;
570 | new_transform.setOrigin(tf::Vector3(twc.at(0, 0), twc.at(0, 1), twc.at(0, 2)));
571 |
572 | tf::Quaternion quaternion(nq[0], nq[1], nq[2], nq[3]);
573 | new_transform.setRotation(quaternion);
574 | tf::poseTFToMsg(new_transform, new_pose.pose);
575 |
576 | if (twc.at(0, 0) != 0){
577 | new_save_poses_nodrift(twc.at(0, 0),twc.at(1,0),twc.at(2,0),nq);
578 | }
579 | new_visualize_cam_nodrift();
580 | }
581 |
582 |
583 | void save_poses(float x_cam, float y_cam, float z_cam, vector q){
584 |
585 | visualization_msgs::Marker marker_cam;
586 | marker_cam.header.stamp = ros::Time();
587 | marker_cam.header.frame_id = "world";
588 | marker_cam.ns = "camera";
589 | marker_cam.id = 1;
590 | marker_cam.type = visualization_msgs::Marker::ARROW;
591 | marker_cam.action = visualization_msgs::Marker::ADD;
592 | // cout< 1){
610 | // all_cam_markers.erase(all_cam_markers.begin());
611 | // }
612 |
613 | cout<<"size of cam poses are: "< q){
617 |
618 | visualization_msgs::Marker marker_cam;
619 | marker_cam.header.stamp = ros::Time();
620 | marker_cam.header.frame_id = "world";
621 | marker_cam.ns = "camera";
622 | marker_cam.id = 1;
623 | marker_cam.type = visualization_msgs::Marker::ARROW;
624 | marker_cam.action = visualization_msgs::Marker::ADD;
625 | // cout< q){
648 |
649 | visualization_msgs::Marker new_marker_cam;
650 | new_marker_cam.header.stamp = ros::Time();
651 | new_marker_cam.header.frame_id = "world";
652 | new_marker_cam.ns = "camera";
653 | new_marker_cam.id = 1;
654 | new_marker_cam.type = visualization_msgs::Marker::ARROW;
655 | new_marker_cam.action = visualization_msgs::Marker::ADD;
656 | // cout< 1){
675 | new_all_cam_markers.erase(new_all_cam_markers.begin());
676 | }
677 |
678 | }
679 |
680 | void new_save_poses_nodrift(float x_cam, float y_cam, float z_cam, vector q){
681 |
682 | visualization_msgs::Marker new_marker_cam;
683 | new_marker_cam.header.stamp = ros::Time();
684 | new_marker_cam.header.frame_id = "world";
685 | new_marker_cam.ns = "camera";
686 | new_marker_cam.id = 1;
687 | new_marker_cam.type = visualization_msgs::Marker::ARROW;
688 | new_marker_cam.action = visualization_msgs::Marker::ADD;
689 | // cout< 1){
708 | new_all_cam_markers_nodrift.erase(new_all_cam_markers_nodrift.begin());
709 | }
710 |
711 | }
712 |
713 |
714 | cv::Mat TransformFromMat(cv::Mat position_mat) {
715 | cv::Mat rotation(3,3,CV_32F);
716 | cv::Mat translation(3,1,CV_32F);
717 |
718 | rotation = position_mat.rowRange(0,3).colRange(0,3);
719 | translation = position_mat.rowRange(0,3).col(3);
720 |
721 | float orb_to_ros[3][3] = {{0,0,1},
722 | {-1,0,0},
723 | {0,-1,0}};
724 |
725 |
726 | cv::Mat ORB_ROS = cv::Mat(3, 3, CV_32FC1, &orb_to_ros);
727 |
728 | //Transform from orb coordinate system to ros coordinate system on camera coordinates
729 | cv::Mat camera_rotation = ORB_ROS * ((ORB_ROS*rotation).t());
730 | cv::Mat camera_translation = -ORB_ROS * ((ORB_ROS*rotation).t())*(ORB_ROS*translation);
731 | cv::Mat Transform_matrix(3,4,CV_32F);
732 | cv::hconcat(camera_rotation, camera_translation, Transform_matrix);
733 |
734 | // cout< toQuaternion(const cv::Mat &M)
740 | {
741 | Eigen::Matrix eigMat = toMatrix3d(M);
742 | Eigen::Quaterniond q(eigMat);
743 |
744 | std::vector v(4);
745 | v[0] = q.x();
746 | v[1] = q.y();
747 | v[2] = q.z();
748 | v[3] = q.w();
749 |
750 | return v;
751 | }
752 |
753 | Eigen::Matrix toMatrix3d(const cv::Mat &cvMat3)
754 | {
755 | Eigen::Matrix M;
756 |
757 | M << cvMat3.at(0,0), cvMat3.at(0,1), cvMat3.at(0,2),
758 | cvMat3.at(1,0), cvMat3.at(1,1), cvMat3.at(1,2),
759 | cvMat3.at(2,0), cvMat3.at(2,1), cvMat3.at(2,2);
760 |
761 | return M;
762 | }
763 |
764 | /*tf::Transform TransformFromMat (cv::Mat position_mat) {
765 | cv::Mat rotation(3,3,CV_32F);
766 | cv::Mat translation(3,1,CV_32F);
767 |
768 | rotation = position_mat.rowRange(0,3).colRange(0,3);
769 | translation = position_mat.rowRange(0,3).col(3);
770 |
771 | tf::Matrix3x3 tf_camera_rotation (rotation.at (0,0), rotation.at (0,1), rotation.at (0,2),
772 | rotation.at (1,0), rotation.at (1,1), rotation.at (1,2),
773 | rotation.at (2,0), rotation.at (2,1), rotation.at (2,2)
774 | );
775 |
776 | tf::Vector3 tf_camera_translation (translation.at (0), translation.at (1), translation.at (2));
777 |
778 | //Coordinate transformation matrix from orb coordinate system to ros coordinate system
779 | const tf::Matrix3x3 tf_orb_to_ros (0, 0, 1,
780 | -1, 0, 0,
781 | 0,-1, 0);
782 |
783 | //Transform from orb coordinate system to ros coordinate system on camera coordinates
784 | tf_camera_rotation = tf_orb_to_ros*tf_camera_rotation;
785 | tf_camera_translation = tf_orb_to_ros*tf_camera_translation;
786 |
787 | //Inverse matrix
788 | tf_camera_rotation = tf_camera_rotation.transpose();
789 | tf_camera_translation = -(tf_camera_rotation*tf_camera_translation);
790 |
791 | //Transform from orb coordinate system to ros coordinate system on map coordinates
792 | tf_camera_rotation = tf_orb_to_ros*tf_camera_rotation;
793 | tf_camera_translation = tf_orb_to_ros*tf_camera_translation;
794 |
795 | return tf::Transform (tf_camera_rotation, tf_camera_translation);
796 | }
797 | */
798 | /*
799 | void visualize_obj(float x_obj,float y_obj,float z_obj)
800 | {
801 | visualization_msgs::Marker marker_obj;
802 | marker_obj.header.stamp = ros::Time();
803 | marker_obj.header.frame_id = "map";
804 | marker_obj.ns = "object";
805 | marker_obj.id = 0;
806 | marker_obj.type = visualization_msgs::Marker::CUBE;
807 | marker_obj.action = visualization_msgs::Marker::ADD;
808 | marker_obj.pose.position.x = x_obj;
809 | marker_obj.pose.position.y = y_obj;
810 | marker_obj.pose.position.z = z_obj;
811 | marker_obj.pose.orientation.x = 0;
812 | marker_obj.pose.orientation.y = 0;
813 | marker_obj.pose.orientation.z = 0;
814 | marker_obj.pose.orientation.w = 0;
815 | marker_obj.scale.x = 0.5;
816 | marker_obj.scale.y = 0.5;
817 | marker_obj.scale.z = 0.5;
818 | marker_obj.color.r = 0.0f;
819 | marker_obj.color.g = 1.0f;
820 | marker_obj.color.b = 0.0f;
821 | marker_obj.color.a = 1.0f;
822 | marker_obj.lifetime = ros::Duration();
823 | all_markers.push_back(marker_obj);
824 | marker_pub.publish(all_markers);
825 | }
826 |
827 |
828 |
829 | void visualize_cam(float x_cam, float y_cam, float z_cam, vector q)
830 | {
831 |
832 | visualization_msgs::Marker marker_cam;
833 | marker_cam.header.stamp = ros::Time();
834 | marker_cam.header.frame_id = "world";
835 | marker_cam.ns = "camera";
836 | marker_cam.id = 1;
837 | marker_cam.type = visualization_msgs::Marker::ARROW;
838 | marker_cam.action = visualization_msgs::Marker::ADD;
839 | // cout<