├── src
├── CMakeLists.txt
├── slam
│ ├── msg
│ │ └── RobotPosition.msg
│ ├── static
│ │ └── soccer_field.png
│ ├── launch
│ │ └── slam.launch
│ ├── srv
│ │ └── ToggleLocalization.srv
│ ├── package.xml
│ ├── src
│ │ ├── probability_map.py
│ │ ├── map_components.py
│ │ ├── particles.py
│ │ └── slam_node.py
│ └── CMakeLists.txt
├── vision
│ ├── msg
│ │ ├── DetectedObstacle.msg
│ │ └── DetectedLines.msg
│ ├── launch
│ │ └── vision.launch
│ ├── config
│ │ ├── default_configuration.json
│ │ └── configuration.json
│ ├── src
│ │ ├── configuration.py
│ │ ├── obstacle_detection.py
│ │ ├── opencv_gui.py
│ │ ├── cv_mat.py
│ │ ├── field_object.py
│ │ ├── vision_node.py
│ │ └── line_segment_detection.py
│ ├── package.xml
│ └── CMakeLists.txt
└── navigation
│ ├── config
│ ├── walking_rate.json
│ └── walking_configurations.json
│ ├── launch
│ └── navigation.launch
│ ├── src
│ ├── obstacle.py
│ ├── op3_module.py
│ ├── head_control_module.py
│ ├── robot.py
│ ├── walking_module.py
│ └── navigation_node.py
│ ├── package.xml
│ └── CMakeLists.txt
├── .catkin_workspace
└── README.md
/src/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | /opt/ros/kinetic/share/catkin/cmake/toplevel.cmake
--------------------------------------------------------------------------------
/src/slam/msg/RobotPosition.msg:
--------------------------------------------------------------------------------
1 | int32 row
2 | int32 column
3 | int32 angle
--------------------------------------------------------------------------------
/src/vision/msg/DetectedObstacle.msg:
--------------------------------------------------------------------------------
1 | bool is_left
2 | bool is_center
3 | bool is_right
4 | int64 area
--------------------------------------------------------------------------------
/src/slam/static/soccer_field.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luncf/online-slam/HEAD/src/slam/static/soccer_field.png
--------------------------------------------------------------------------------
/.catkin_workspace:
--------------------------------------------------------------------------------
1 | # This file currently only serves to mark the location of a catkin workspace for tool integration
2 |
--------------------------------------------------------------------------------
/src/navigation/config/walking_rate.json:
--------------------------------------------------------------------------------
1 | {
2 | "forward": 1.0,
3 | "turn_left": 10.0,
4 | "turn_right": 9.0
5 | }
--------------------------------------------------------------------------------
/src/vision/msg/DetectedLines.msg:
--------------------------------------------------------------------------------
1 | string center_line
2 | string goal_line
3 | string boundary_line
4 | string undefined_lines
--------------------------------------------------------------------------------
/src/slam/launch/slam.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/src/slam/srv/ToggleLocalization.srv:
--------------------------------------------------------------------------------
1 | bool pause
2 | bool start
3 |
4 | bool place_left
5 | bool place_center
6 | bool place_right
7 |
8 | bool obstacle
9 | bool turn_left
10 | bool turn_right
11 |
12 | ---
13 |
14 | bool success
--------------------------------------------------------------------------------
/src/vision/launch/vision.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/src/navigation/launch/navigation.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/src/navigation/src/obstacle.py:
--------------------------------------------------------------------------------
1 | class Obstacle(object):
2 |
3 | def __init__(self):
4 | self.is_on_left = False
5 | self.is_on_right = False
6 | self.is_center = False
7 |
8 | def update(self, msg):
9 | self.is_on_left = msg.is_left
10 | self.is_center = msg.is_center
11 | self.is_on_right = msg.is_right
--------------------------------------------------------------------------------
/src/vision/config/default_configuration.json:
--------------------------------------------------------------------------------
1 | {
2 | "obstacle": {
3 | "max_area": 5000,
4 | "min_area": 1000,
5 | "output_colour": [
6 | 0,
7 | 255,
8 | 0
9 | ],
10 | "threshold": [
11 | 0,
12 | 0,
13 | 0
14 | ],
15 | "value": [
16 | 0,
17 | 0,
18 | 0
19 | ]
20 | },
21 | "camera_index": 0,
22 | "field": {
23 | "min_area": 1500,
24 | "threshold": [
25 | 0,
26 | 0,
27 | 0
28 | ],
29 | "value": [
30 | 0,
31 | 0,
32 | 0
33 | ]
34 | },
35 | "lines": {
36 | "corner_max_distance_apart": 20,
37 | "max_distance_apart": 20,
38 | "max_width": 20,
39 | "min_length": 20,
40 | "output_boundary_line_colour": [
41 | 255,
42 | 0,
43 | 0
44 | ],
45 | "output_center_line_colour": [
46 | 255,
47 | 255,
48 | 0
49 | ],
50 | "output_goal_area_line_colour": [
51 | 255,
52 | 0,
53 | 255
54 | ],
55 | "output_undefined_line_colour": [
56 | 0,
57 | 0,
58 | 255
59 | ],
60 | "threshold": [
61 | 0,
62 | 0,
63 | 0
64 | ],
65 | "value": [
66 | 0,
67 | 0,
68 | 0
69 | ]
70 | },
71 | "resized_frame_height": 320
72 | }
--------------------------------------------------------------------------------
/src/vision/src/configuration.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import shutil
4 |
5 | import numpy as np
6 |
7 |
8 | class NumpyEncoder(json.JSONEncoder):
9 | # Encodes numpy arrays as lists since JSON does not support np.ndarray
10 | def default(self, obj):
11 | return obj.tolist() if isinstance(obj, np.ndarray) else json.JSONEncoder.default(self, obj)
12 |
13 |
14 | class Configuration(object):
15 |
16 | def __init__(self, configuration_directory):
17 | self.config = None
18 | self.file_path = os.path.join(configuration_directory, 'configuration.json')
19 |
20 | if not os.path.isfile(self.file_path):
21 | shutil.copy2(os.path.join(configuration_directory, 'default_configuration.json'), self.file_path)
22 |
23 | def load(self):
24 | # Load the configuration JSON file
25 | with open(self.file_path, 'r') as fp:
26 | self.config = json.load(fp)
27 |
28 | return self.config
29 |
30 | def save(self):
31 | # Save the configurations in a JSON file
32 | with open(self.file_path, 'w') as fp:
33 | json.dump(self.config, fp, sort_keys=True, indent=2, cls=NumpyEncoder)
34 |
35 | def update(self, field, value):
36 | # Update a value in the configuration
37 | self.config[field] = value
38 | self.save()
39 |
--------------------------------------------------------------------------------
/src/navigation/src/op3_module.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from time import sleep
4 |
5 | import rospy
6 | from std_msgs.msg import String
7 |
8 | from robotis_controller_msgs.srv import SetJointModule
9 |
10 |
11 | class OP3Module(object):
12 |
13 | joint_list = [
14 | 'head_pan', 'head_tilt',
15 | 'l_sho_pitch', 'l_sho_roll', 'l_el', 'r_sho_pitch', 'r_sho_roll', 'r_el',
16 | 'l_hip_pitch', 'l_hip_roll', 'l_hip_yaw', 'l_knee', 'l_ank_pitch', 'l_ank_roll',
17 | 'r_hip_pitch', 'r_hip_roll', 'r_hip_yaw', 'r_knee', 'r_ank_pitch', 'r_ank_roll'
18 | ]
19 |
20 |
21 | def __init__(self, module_name):
22 | # Initialize OP3 ROS publishers
23 | self.module_name = module_name
24 | self.set_joint_module_client = rospy.ServiceProxy('/robotis/set_present_joint_ctrl_modules', SetJointModule)
25 |
26 | def enable(self, joints=joint_list):
27 | # Call service to enable module
28 | service_name = '/robotis/set_present_joint_ctrl_modules'
29 |
30 | rospy.wait_for_service(service_name)
31 |
32 | try:
33 | self.set_joint_module_client.call(joints, [self.module_name for _ in range(len(joints))])
34 | sleep(0.5)
35 | except rospy.ServiceException:
36 | rospy.logerr('Failed to call service: {0}'.format(service_name))
37 |
38 |
--------------------------------------------------------------------------------
/src/vision/config/configuration.json:
--------------------------------------------------------------------------------
1 | {
2 | "camera_index": 0,
3 | "field": {
4 | "min_area": 1500,
5 | "threshold": [
6 | 60,
7 | 10,
8 | 10
9 | ],
10 | "value": [
11 | 16,
12 | 115,
13 | 137
14 | ]
15 | },
16 | "flush_ratio": 0.25,
17 | "lines": {
18 | "corner_max_distance_apart": 25,
19 | "max_distance_apart": 25,
20 | "max_width": 50,
21 | "min_length": 30,
22 | "output_boundary_line_colour": [
23 | 255,
24 | 0,
25 | 0
26 | ],
27 | "output_center_line_colour": [
28 | 255,
29 | 255,
30 | 0
31 | ],
32 | "output_goal_area_line_colour": [
33 | 255,
34 | 0,
35 | 255
36 | ],
37 | "output_undefined_line_colour": [
38 | 0,
39 | 0,
40 | 255
41 | ],
42 | "threshold": [
43 | 50,
44 | 20,
45 | 20
46 | ],
47 | "value": [
48 | 138,
49 | 124,
50 | 128
51 | ]
52 | },
53 | "obstacle": {
54 | "max_area": 25000,
55 | "min_area": 8000,
56 | "output_colour": [
57 | 0,
58 | 255,
59 | 0
60 | ],
61 | "threshold": [
62 | 60,
63 | 20,
64 | 20
65 | ],
66 | "value": [
67 | 27,
68 | 159,
69 | 145
70 | ]
71 | },
72 | "resized_frame_height": 320
73 | }
--------------------------------------------------------------------------------
/src/navigation/config/walking_configurations.json:
--------------------------------------------------------------------------------
1 | {
2 | "init_x_offset": 0.005,
3 | "init_y_offset": 0.020,
4 | "init_z_offset": 0.025,
5 | "init_roll_offset": 0.0,
6 | "init_pitch_offset": 0.0,
7 | "init_yaw_offset": 0.0,
8 | "dsp_ratio": 0.20,
9 | "step_fb_ratio": 0.25,
10 | "move_aim_on": false,
11 | "balance_enable": false,
12 | "balance_hip_roll_gain": 0.35,
13 | "balance_knee_gain": 0.30,
14 | "balance_ankle_roll_gain": 0.70,
15 | "balance_ankle_pitch_gain": 0.90,
16 | "y_swap_amplitude": 0.010,
17 | "z_swap_amplitude": 0.003,
18 | "arm_swing_gain": 0.2,
19 | "pelvis_offset": 0.0,
20 | "hip_pitch_offset": 18.0,
21 | "p_gain": 32,
22 | "i_gain": 0,
23 | "d_gain": 0,
24 | "initial": {
25 | "x_move_amplitude": 0.000,
26 | "y_move_amplitude": 0.000,
27 | "z_move_amplitude": 0.020,
28 | "angle_move_amplitude": 0.0,
29 | "period_time": 0.500
30 | },
31 | "marching": {
32 | "x_move_amplitude": -0.001,
33 | "y_move_amplitude": 0.001,
34 | "z_move_amplitude": 0.030,
35 | "angle_move_amplitude": 0.0,
36 | "period_time": 0.580
37 | },
38 | "forward": {
39 | "x_move_amplitude": 0.006,
40 | "y_move_amplitude": 0.002,
41 | "z_move_amplitude": 0.040,
42 | "angle_move_amplitude": 0.0,
43 | "period_time": 0.580
44 | },
45 | "turn_left": {
46 | "x_move_amplitude": -0.001,
47 | "y_move_amplitude": 0.000,
48 | "z_move_amplitude": 0.040,
49 | "angle_move_amplitude": 3.0,
50 | "period_time": 0.580
51 | },
52 | "turn_right": {
53 | "x_move_amplitude": -0.001,
54 | "y_move_amplitude": 0.006,
55 | "z_move_amplitude": 0.040,
56 | "angle_move_amplitude": -3.0,
57 | "period_time": 0.580
58 | }
59 | }
--------------------------------------------------------------------------------
/src/navigation/src/head_control_module.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 |
3 | import json
4 | import os
5 | from time import sleep
6 |
7 | import rospy
8 | from std_msgs.msg import Header
9 | from sensor_msgs.msg import JointState
10 |
11 | from op3_module import OP3Module
12 |
13 | class HeadControlModule(OP3Module):
14 |
15 | def __init__(self):
16 | super(HeadControlModule, self).__init__('head_control_module')
17 |
18 | # Initialize OP3 ROS publishers and service clients
19 | self.set_joint_state_pub = rospy.Publisher('/robotis/head_control/set_joint_states', JointState, queue_size=0)
20 |
21 | def enable(self):
22 | return super(HeadControlModule, self).enable(joints=[joint for joint in OP3Module.joint_list if joint.startswith('head')])
23 |
24 | def __move_actuator(self, actuator_names, positions, velocity=0):
25 | header = Header()
26 | header.seq = 0
27 | header.stamp = rospy.Time.now()
28 | header.frame_id = ''
29 |
30 | message = JointState()
31 | message.header = header
32 | message.name = actuator_names
33 | message.position = positions
34 | message.velocity = [velocity for _ in range(len(actuator_names))]
35 | message.effort = [0 for _ in range((len(actuator_names)))]
36 |
37 | self.set_joint_state_pub.publish(message)
38 | sleep(1)
39 |
40 | def move_head(self, pan_position, tilt_position):
41 | self.__move_actuator(actuator_names=['head_pan', 'head_tilt'],
42 | positions=[pan_position, tilt_position])
43 |
44 | def look_up(self):
45 | self.move_head(pan_position=0, tilt_position=0)
46 |
47 | def look_down(self):
48 | self.move_head(pan_position=0, tilt_position=-0.7)
49 |
50 | def look_left(self):
51 | self.move_head(pan_position=0.75, tilt_position=-0.55)
52 |
53 | def look_right(self):
54 | self.move_head(pan_position=-0.75, tilt_position=-0.55)
55 |
--------------------------------------------------------------------------------
/src/vision/src/obstacle_detection.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import cv2 as cv
4 | import numpy as np
5 | import rospy
6 |
7 | from cv_mat import CVMat
8 |
9 |
10 | class ObstacleDetector(CVMat):
11 | RECTANGLE_RATIO = 1.618
12 |
13 | def __init__(self, frame):
14 | CVMat.__init__(self, frame=frame)
15 |
16 | def extract_obstacle(self, thresh_lb, thresh_ub):
17 | self.extract_object(thresh_lb=thresh_lb, thresh_ub=thresh_ub, field=None, kernel_size=3)
18 |
19 | def bounding_rect(self, min_area, max_area, frame_height, frame_width, flush_width):
20 | # Find contours
21 | _, contours, hierarchy = cv.findContours(self.frame, mode=cv.RETR_EXTERNAL, method=cv.CHAIN_APPROX_SIMPLE)
22 |
23 | if contours:
24 | positions = []
25 | position_text = ''
26 | contours = sorted(contours, key=cv.contourArea, reverse=True)
27 |
28 | for contour in contours:
29 | if min_area <= cv.contourArea(contour) <= max_area:
30 | x1, y1, w, h = cv.boundingRect(contour)
31 | x2, y2 = x1+w, y1+h
32 | mid_x, mid_y = ((x2 - x1) / 2.0) + x1, ((y2 - y1) / 2.0) + y1
33 | area = cv.contourArea(contour)
34 |
35 | # Check for direction between self and obstacle
36 | left_bound = (frame_width - 2*flush_width) * (1.0/4.0) + flush_width
37 | right_bound = (frame_width - 2*flush_width) * (3.0/4.0) + flush_width
38 |
39 | if mid_x <= left_bound:
40 | position_text += 'left_'
41 | elif mid_x > left_bound and mid_x <= right_bound:
42 | position_text += 'center_'
43 | elif mid_x > right_bound:
44 | position_text += 'right_'
45 |
46 | positions += [((x1, y1), (x2, y2))]
47 |
48 | obstacles = (positions, position_text, cv.contourArea(contours[0]))
49 | else:
50 | obstacles = (None, 'none', -1)
51 |
52 | return obstacles
53 |
--------------------------------------------------------------------------------
/src/navigation/src/robot.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | import json
3 |
4 | import rospy
5 |
6 | from slam.msg import RobotPosition
7 |
8 |
9 | class RobotMovement(Enum):
10 | OBSTACLE = 0
11 | FORWARD = 1
12 | TURN_LEFT = 2
13 | TURN_RIGHT = 3
14 |
15 |
16 | class Robot(object):
17 |
18 | def __init__(self, rate_config_path):
19 | self.row = -1
20 | self.column = -1
21 | self.angle = -1
22 |
23 | with open(rate_config_path) as config:
24 | rates = json.load(config)
25 | self.forward_rate = rates['forward']
26 | self.turn_left_rate = rates['turn_left']
27 | self.turn_right_rate = rates['turn_right']
28 |
29 | def update_pose(self, msg):
30 | self.row = msg.row
31 | self.column = msg.column
32 | self.angle = msg.angle
33 | rospy.loginfo('[Navigation] robot pose: row - {0:2d} column - {1:2d} angle - {2:1d}'. \
34 | format(self.row, self.column, self.angle))
35 |
36 | def next_movement(self, avoid_obstacle, num_map_rows, num_map_columns, bound_offset=2):
37 | movement = None
38 |
39 | at_top_bound = self.row <= bound_offset
40 | at_bottom_bound = self.row >= num_map_rows - 1 - bound_offset
41 | at_left_bound = self.column <= bound_offset
42 | at_right_bound = self.column >= num_map_columns - 1 - bound_offset
43 |
44 | top_field = self.row <= num_map_rows / 2
45 | bottom_field = self.row > num_map_rows / 2
46 | left_field = self.column <= num_map_columns / 2
47 | right_field = self.column > num_map_columns / 2
48 |
49 | if avoid_obstacle:
50 | movement = RobotMovement.OBSTACLE
51 |
52 | elif (self.angle == 0 and bottom_field and at_right_bound) or \
53 | (self.angle == 1 and right_field and at_top_bound) or \
54 | (self.angle == 2 and top_field and at_left_bound) or \
55 | (self.angle == 3 and left_field and at_bottom_bound):
56 | movement = RobotMovement.TURN_LEFT
57 |
58 | elif (self.angle == 0 and top_field and at_right_bound) or \
59 | (self.angle == 1 and left_field and at_top_bound) or \
60 | (self.angle == 2 and bottom_field and at_left_bound) or \
61 | (self.angle == 3 and right_field and at_bottom_bound):
62 | movement = RobotMovement.TURN_RIGHT
63 |
64 | else:
65 | movement = RobotMovement.FORWARD
66 |
67 | return movement
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/src/vision/src/opencv_gui.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 |
4 | COLOUR_SPACE_CHANNELS = ('H', 'S', 'V')
5 | COLOUR_SPACE_MAX_VALUES = (255, 255, 255)
6 |
7 |
8 | DEBUG_WINDOW_NAME = 'debug'
9 | COLOUR_SPACE_THRESHOLD_TRACKBAR_NAME_FORMAT = '{0} threshold'
10 |
11 |
12 | def create_window(name, x=0, y=0):
13 | # Create a window with name at position (x, y)
14 | cv.namedWindow(name)
15 | cv.moveWindow(name, x, y)
16 |
17 |
18 | def create_trackbar(window_name, trackbar_name, default_value, max_value, callback):
19 | # Create a trackbar in window
20 | create_window(window_name)
21 | cv.createTrackbar(trackbar_name, window_name, default_value, max_value, callback)
22 |
23 |
24 | def set_trackbar_position(trackbar_name, window_name, position):
25 | # Set trackbar position
26 | cv.setTrackbarPos(trackbar_name, window_name, position)
27 |
28 |
29 | def set_mouse_cb(window_name, callback):
30 | # Set mouse callback
31 | create_window(window_name)
32 | cv.setMouseCallback(window_name, callback)
33 |
34 |
35 | def show(window_name, frame):
36 | # Show any window
37 | cv.imshow(window_name, frame)
38 |
39 |
40 | def show_debug_window(frame, text):
41 | # Show the debug window with the selected object name
42 | frame = cv.cvtColor(frame, code=cv.COLOR_GRAY2BGR)
43 | cv.putText(frame, text=text, org=(10, 30), fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=1, color=(0, 0, 255))
44 | cv.imshow(DEBUG_WINDOW_NAME, frame)
45 |
46 |
47 | def create_colour_space_threshold_trackbar(default_colour_space_threshold, callback):
48 | create_window(DEBUG_WINDOW_NAME)
49 |
50 | # Create the appropriate colour space threshold trackbars in the object window
51 | cv.createTrackbar(COLOUR_SPACE_THRESHOLD_TRACKBAR_NAME_FORMAT.format(COLOUR_SPACE_CHANNELS[0]),
52 | DEBUG_WINDOW_NAME, default_colour_space_threshold[0], COLOUR_SPACE_MAX_VALUES[0],
53 | lambda value: callback([value, -1, -1]))
54 | cv.createTrackbar(COLOUR_SPACE_THRESHOLD_TRACKBAR_NAME_FORMAT.format(COLOUR_SPACE_CHANNELS[1]),
55 | DEBUG_WINDOW_NAME, default_colour_space_threshold[1], COLOUR_SPACE_MAX_VALUES[1],
56 | lambda value: callback([-1, value, -1]))
57 | cv.createTrackbar(COLOUR_SPACE_THRESHOLD_TRACKBAR_NAME_FORMAT.format(COLOUR_SPACE_CHANNELS[2]),
58 | DEBUG_WINDOW_NAME, default_colour_space_threshold[2], COLOUR_SPACE_MAX_VALUES[2],
59 | lambda value: callback([-1, -1, value]))
60 |
61 |
62 | def set_colour_space_threshold_trackbar_position(colour_space_threshold_values):
63 | # Modify trackbar positions
64 | for index, channel in enumerate(COLOUR_SPACE_CHANNELS):
65 | set_trackbar_position(COLOUR_SPACE_THRESHOLD_TRACKBAR_NAME_FORMAT.format(channel),
66 | DEBUG_WINDOW_NAME, colour_space_threshold_values[index])
67 |
68 |
69 | def teardown():
70 | cv.destroyAllWindows()
71 |
--------------------------------------------------------------------------------
/src/navigation/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | navigation
4 | 0.0.0
5 | The navigation package
6 |
7 |
8 |
9 |
10 | andy
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | catkin
52 | rospy
53 | std_msgs
54 | rospy
55 | std_msgs
56 | rospy
57 | std_msgs
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/src/slam/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | slam
4 | 0.0.0
5 | The slam package
6 |
7 |
8 |
9 |
10 | andy
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | catkin
52 | rospy
53 | std_msgs
54 | message_generation
55 | rospy
56 | std_msgs
57 | rospy
58 | std_msgs
59 | message_runtime
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/src/vision/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | vision
4 | 0.0.0
5 | The vision package
6 |
7 |
8 |
9 |
10 | andy
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | catkin
52 | rospy
53 | std_msgs
54 | message_generation
55 | rospy
56 | std_msgs
57 | rospy
58 | std_msgs
59 | message_runtime
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/src/vision/src/cv_mat.py:
--------------------------------------------------------------------------------
1 | import copy
2 |
3 | import cv2 as cv
4 | import numpy as np
5 |
6 |
7 | class CVMat(object):
8 |
9 | BLACK = (0, 0, 0)
10 | WHITE = (255, 255, 255)
11 |
12 | def __init__(self, frame, width=0, height=0):
13 | self.frame = self.resize(frame, width=width, height=height) if width > 0 or height > 0 else frame
14 | # self.frame = self.add_border()
15 | self.colour_space_frame = cv.cvtColor(self.frame, code=cv.COLOR_BGR2LAB)
16 |
17 | def clone(self):
18 | return copy.deepcopy(self)
19 |
20 | @staticmethod
21 | def resize(frame, width=0, height=0):
22 | # Resize an image based on the given width OR height
23 | # Will choose width if both width and height are given
24 |
25 | (_height, _width) = frame.shape[:2]
26 | ratio = _width / float(_height)
27 |
28 | if width > 0:
29 | height = int(round(width / ratio))
30 | elif height > 0:
31 | width = int(round(ratio * height))
32 | else:
33 | width, height = _width, _height
34 |
35 | return cv.resize(frame, (width, height), interpolation=cv.INTER_AREA)
36 |
37 | def add_border(self):
38 | # Add a black border around frame
39 | border_size = 10
40 | return cv.copyMakeBorder(self.frame, borderType=cv.BORDER_CONSTANT, value=[0, 0, 0],
41 | top=border_size, bottom=border_size, left=border_size, right=border_size)
42 |
43 | def background_mask(self, thresh_lb, thresh_ub, min_area, line_width=40):
44 | # Blur and threshold the original image
45 | self.frame = cv.GaussianBlur(self.colour_space_frame, ksize=(25, 25), sigmaX=0)
46 | self.frame = cv.inRange(self.frame, lowerb=thresh_lb, upperb=thresh_ub)
47 |
48 | # Remove any noise
49 | kernel_ellipse_3 = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
50 | self.frame = cv.morphologyEx(self.frame, op=cv.MORPH_OPEN, kernel=kernel_ellipse_3)
51 | self.frame = cv.morphologyEx(self.frame, op=cv.MORPH_CLOSE, kernel=kernel_ellipse_3)
52 |
53 | # Find contours that fulfill the min_area requirement
54 | _, contours, _ = cv.findContours(self.frame, mode=cv.RETR_EXTERNAL, method=cv.CHAIN_APPROX_SIMPLE)
55 | contours = [contour for contour in contours if cv.contourArea(contour) > min_area] if contours else None
56 |
57 | # Create a blank frame and fill in the contours
58 | self.frame = np.zeros(self.frame.shape[:2], dtype=np.uint8)
59 | if contours:
60 | cv.fillPoly(self.frame, pts=contours, color=self.WHITE)
61 |
62 | # Morph close to fill in the lines
63 | self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE,
64 | cv.getStructuringElement(cv.MORPH_RECT, (line_width, line_width)))
65 |
66 | def extract_object(self, thresh_lb, thresh_ub, field=None, kernel_size=3):
67 | # Blur and threshold the original image
68 | blur = cv.GaussianBlur(self.colour_space_frame, ksize=(5, 5), sigmaX=0)
69 | threshold = cv.inRange(blur, lowerb=thresh_lb, upperb=thresh_ub)
70 |
71 | # Apply field mask if given
72 | self.frame = cv.bitwise_and(field, field, mask=threshold) if field is not None else threshold
73 |
74 | # Remove any noise
75 | kernel_ellipse = cv.getStructuringElement(cv.MORPH_ELLIPSE, (kernel_size, kernel_size))
76 | self.frame = cv.morphologyEx(self.frame, op=cv.MORPH_OPEN, kernel=kernel_ellipse)
77 | self.frame = cv.morphologyEx(self.frame, op=cv.MORPH_CLOSE, kernel=kernel_ellipse)
78 |
79 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Simultaneous Localization and Mapping
2 |
3 | This is a navigation system for the robot that incorprates SLAM which is able to locate itself and update the obstacles in a pre-known map (soccer field), using **particle filter (probability)** method.
4 |
5 | The vision module is from Assignment 1 with some minor bug fixes and part of the navigation module is adapted from Assignment 2. The SLAM module will try to estimate the robot's position based on the probability maps and map objects within it's view.
6 |
7 |
8 | ## Getting Started
9 |
10 | ### Requirements
11 |
12 | This event has the following requirements:
13 |
14 | 1. OpenCV 3.2+
15 | 2. Python 2.7
16 | 3. Darwin-OP 3 along with the framework
17 |
18 | ### Features
19 |
20 | 1. Localization based on probability maps
21 | 2. Mapping predefined obstacles with vision
22 |
23 |
24 | ### How to run the project
25 |
26 | 1. Copy the files over to Darwin-OP 3 into `~/catkin_ws/src/`
27 | 2. Compile
28 | ```bash
29 | cd catkin_ws
30 | catkin_make
31 | ```
32 | 3. Use the ROS launch file to start the program
33 | ```bash
34 | roslaunch slam slam.launch
35 | ```
36 | 4. Press the `mode` button on the Darwin-OP 3 to reset
37 | 5. Press the `start` button on the Darwin-OP 3 to start the event
38 |
39 | The vision can be tuned via the GUI. To start, select the desired object to tune using the `Object` trackbar in the `debug` window. Change the average colour space value by selecting a range in the `camera` window. Modify the threshold values via the other three trackbars.
40 |
41 | ### Configuration
42 |
43 | #### Vision module
44 |
45 | The vision module has a configuration file under `config/configuration.json`. Use this file to tune the vision based on the environment it is used in.
46 |
47 | The configuration file gives the ability to change the setting of the
48 | camera image and the detected features.
49 |
50 | Camera:
51 |
52 | - camera_index: the index of the video camera device (default: 0)
53 | - resized_frame_height: height of the resized frame, useful
54 | to reduce processing load (default: 320)
55 |
56 | Obstacles:
57 |
58 | - min_area: minimum area of contours (default: 5000)
59 | - max_area: maximum area of contours (default: 100000)
60 | - output_colour: line colour of the object in the output frame (default: obstacle colour)
61 | - threshold: value added to calculate the min/max rnage of the colour space (default: [0, 0, 0])
62 | - value: average value of the selected range in the colour space (default: [0, 0, 0])
63 |
64 | Field:
65 |
66 | - min_area: smallest contour area to consider as part of the field (default: 1500)
67 | - threshold: value added to calculate the min/max range of the colour space (default: [0, 0, 0])
68 | - value: average value of the selected range in the colour space (default: [0, 0, 0])
69 |
70 | Lines:
71 |
72 | - corner_max_distance_apart: maximum distance between lines to be consider as a corner (default: 20)
73 | - max_distance_apart: maximum distance between line segments to be considered as a line (default: 20)
74 | - max_width: maximum width of each line (default: 20)
75 | - min_length: minimum length of each line segment (default: 20)
76 | - output_boundary_line_colour: line colour of the boundary lines in the output frame (default: blue)
77 | - output_center_line_colour: line colour of the center line in the output frame (default: cyan)
78 | - output_goal_area_line_colour: line colour of the goal area lines in the output frame (default: magenta)
79 | - output_undefined_line_colour: line colour of any lines that can not be classified (default: red)
80 | - threshold: value added to calculate the min/max range of the colour space (default: [0, 0, 0])
81 | - value: average value of the selected range in the colour space (default: [0, 0, 0])
82 |
83 | #### Navigation module
84 | The navigation module has a configuration file under `config/`. Use this file to tune the walk based on the environment it is in.
85 |
86 | - **walking_configurations.json**: This configuration file lists the walking gaits for each different type of walk. See the Darwin-OP 3 guide to understand each parameter.
87 |
88 | #### SLAM module
89 | The SLAM module has a configuration file under `config/`. Use this file to tune the walking rates based on the walking configuration of the navigation module.
90 |
91 |
92 | ## Future additions
93 | - [ ] Different movement based on obstacle location
94 | - [ ] Include lines seen in navigation decision
95 | - [ ] Better initial localization (currently only supports 3 points)
96 |
--------------------------------------------------------------------------------
/src/vision/src/field_object.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import rospy
4 | from vision.msg import DetectedLines, DetectedObstacle
5 |
6 | class FieldObject(object):
7 |
8 | def __init__(self, configuration, name):
9 | self.name = name
10 |
11 | self.threshold = np.array(configuration['threshold'])
12 | self.value = np.array(configuration['value'])
13 | self.lower_bound, self.upper_bound = self.get_colour_space_bounds()
14 |
15 | def get_colour_space_bounds(self):
16 | threshold_lower_bound = []
17 | threshold_upper_bound = []
18 |
19 | for index in range(0, len(self.value)):
20 | threshold_lower_bound += [self.round_int(self.value[index] - self.threshold[index])]
21 | threshold_upper_bound += [self.round_int(self.value[index] + self.threshold[index])]
22 |
23 | return np.array(threshold_lower_bound), np.array(threshold_upper_bound)
24 |
25 | def set_colour_space_value(self, values):
26 | self.value = np.array([self.round_int(value) for value in values])
27 | self.lower_bound, self.upper_bound = self.get_colour_space_bounds()
28 |
29 | def set_colour_space_threshold(self, threshold):
30 | self.threshold = np.array([self.round_int(value) if value >= 0 else self.threshold[index]
31 | for index, value in enumerate(threshold)])
32 | self.lower_bound, self.upper_bound = self.get_colour_space_bounds()
33 |
34 | def export_configuration(self):
35 | return {
36 | 'threshold': self.threshold, 'value': self.value,
37 | }
38 |
39 | @staticmethod
40 | def round_int(num):
41 | return max(0, int(round(num)))
42 |
43 |
44 | class Field(FieldObject):
45 |
46 | def __init__(self, configuration, name='field'):
47 | FieldObject.__init__(self, name=name, configuration=configuration)
48 | self.min_area = configuration['min_area']
49 |
50 | def export_configuration(self):
51 | configuration = super(Field, self).export_configuration()
52 | configuration['min_area'] = self.min_area
53 |
54 | return configuration
55 |
56 |
57 | class Lines(FieldObject):
58 |
59 | def __init__(self, configuration, name='lines'):
60 | FieldObject.__init__(self, name=name, configuration=configuration)
61 |
62 | self.pub = rospy.Publisher('/vision/' + name, DetectedLines, queue_size=1)
63 |
64 | self.max_distance_apart = configuration['max_distance_apart']
65 | self.min_length = configuration['min_length']
66 | self.max_width = configuration['max_width']
67 | self.corner_max_distance_apart = configuration['corner_max_distance_apart']
68 | self.output_center_colour = configuration["output_center_line_colour"]
69 | self.output_goal_area_colour = configuration["output_goal_area_line_colour"]
70 | self.output_boundary_colour = configuration["output_boundary_line_colour"]
71 | self.output_undefined_colour = configuration["output_undefined_line_colour"]
72 |
73 | def export_configuration(self):
74 | configuration = super(Lines, self).export_configuration()
75 |
76 | configuration['max_distance_apart'] = self.max_distance_apart
77 | configuration['min_length'] = self.min_length
78 | configuration['max_width'] = self.max_width
79 | configuration['corner_max_distance_apart'] = self.corner_max_distance_apart
80 | configuration["output_center_line_colour"] = self.output_center_colour
81 | configuration["output_goal_area_line_colour"] = self.output_goal_area_colour
82 | configuration["output_boundary_line_colour"] = self.output_boundary_colour
83 | configuration["output_undefined_line_colour"] = self.output_undefined_colour
84 |
85 | return configuration
86 |
87 | def publish_msg(self, lines):
88 | message = DetectedLines()
89 |
90 | message.boundary_line = '|'.join([line_description[1] for line_description in lines['boundary'][0]])
91 | message.goal_line = '|'.join([line_description[1] for line_description in lines['goal_area'][0]])
92 | message.center_line = '|'.join([line_description[1] for line_description in lines['center'][0]])
93 | message.undefined_lines = '|'.join([line_description[1] for line_description in lines['undefined'][0]])
94 |
95 | self.pub.publish(message)
96 |
97 |
98 | class Obstacle(FieldObject):
99 |
100 | def __init__(self, configuration, name='obstacle'):
101 | FieldObject.__init__(self, name=name, configuration=configuration)
102 |
103 | self.pub = rospy.Publisher('/vision/' + name, DetectedObstacle, queue_size=1)
104 |
105 | self.min_area = configuration['min_area']
106 | self.max_area = configuration['max_area']
107 |
108 | self.output_colour = configuration['output_colour']
109 |
110 | def export_configuration(self):
111 | configuration = super(Obstacle, self).export_configuration()
112 |
113 | configuration['min_area'] = self.min_area
114 | configuration['max_area'] = self.max_area
115 | configuration['output_colour'] = self.output_colour
116 |
117 | return configuration
118 |
119 | def publish_msg(self, position, area):
120 | position = position.lower()
121 |
122 | message = DetectedObstacle()
123 | message.is_left = 'left' in position
124 | message.is_center = 'center' in position
125 | message.is_right = 'right' in position
126 | message.area = area
127 |
128 | self.pub.publish(message)
129 |
--------------------------------------------------------------------------------
/src/slam/src/probability_map.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import numpy as np
4 |
5 |
6 | class ProbabilityMap(object):
7 |
8 | def __init__(self, regions, width=28, height=19):
9 | self.map = np.zeros((height, width), dtype=np.float32)
10 | for row, column in regions:
11 | self.map[row][column] = 1.0 / len(regions)
12 |
13 | self.regions = regions
14 |
15 | def normalize(self):
16 | total = np.sum(self.map)
17 | for (x, y), probability in np.ndenumerate(self.map):
18 | self.map[x][y] = probability / float(total)
19 |
20 |
21 | CENTER_LINE_FEATURE_PROBABILITY_MAP = {
22 | '0': ProbabilityMap(regions=[(5, 9), (6, 9), (7, 9), (8, 9), (9, 9), (10, 9), (11, 9), (12, 9), (13, 9)]),
23 | '90': ProbabilityMap(regions=[(15, 11), (15, 12), (15, 13), (15, 14), (15, 15), (15, 16)]),
24 | '180': ProbabilityMap(regions=[(5, 18), (6, 18), (7, 18), (8, 18), (9, 18), (10, 18), (11, 18), (12, 18), (13, 18)]),
25 | '270': ProbabilityMap(regions=[(3, 11), (3, 12), (3, 13), (3, 14), (3, 15), (3, 16)])
26 | }
27 |
28 | BOUNDARY_T_FEATURE_PROBABILITY_MAP = {
29 | '0': ProbabilityMap(regions=[(0, 9), (1, 9), (17, 9), (18, 9), (1, 22), (2, 22), (3, 22), (4, 22), (5, 22), (13, 22), (14, 22), (15, 22), (16, 22), (17, 22)]),
30 | '90': ProbabilityMap(regions=[(8, 0), (8, 1), (8, 2), (5, 11), (5, 12), (5, 13), (5, 14), (5, 15), (5, 16), (8, 25), (8, 26), (8, 27)]),
31 | '180': ProbabilityMap(regions=[(1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (13, 5), (14, 5), (15, 5), (16, 5), (17, 5), (0, 18), (1, 18), (17, 18), (18, 18)]),
32 | '270': ProbabilityMap(regions=[(10, 0), (10, 1), (10, 2), (13, 11), (13, 12), (13, 13), (13, 14), (13, 15), (13, 16), (10, 25), (10, 26), (10, 27)])
33 | }
34 |
35 | BOUNDARY_L_FEATURE_PROBABILITY_MAP = {
36 | '0': ProbabilityMap(regions=[(0, 22), (1, 22), (2, 22), (16, 22), (17, 22), (18, 22)]),
37 | '90': ProbabilityMap(regions=[(5, 0), (5, 1), (5, 2), (5, 25), (5, 26), (5, 27)]),
38 | '180': ProbabilityMap(regions=[(0, 5), (1, 5), (2, 5), (16, 5), (17, 5), (18, 5)]),
39 | '270': ProbabilityMap(regions=[(13, 0), (13, 1), (13, 2), (13, 25), (13, 26), (13, 27)])
40 | }
41 |
42 | GOAL_L_FEATURE_PROBABILITY_MAP = {
43 | '0': ProbabilityMap(regions=[(1, 19), (2, 19), (3, 19), (4, 19), (5, 19), (13, 19), (14, 19), (15, 19), (16, 19), (17, 19)]),
44 | '90': ProbabilityMap(regions=[(8, 1), (8, 2), (8, 3), (8, 4), (8, 5), (8, 22), (8, 23), (8, 25), (8, 26), (8, 27)]),
45 | '180': ProbabilityMap(regions=[(1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (13, 8), (14, 8), (15, 8), (16, 8), (17, 8)]),
46 | '270': ProbabilityMap(regions=[(10, 1), (10, 2), (10, 3), (10, 4), (10, 5), (10, 22), (10, 23), (10, 25), (10, 26), (10, 27)])
47 | }
48 |
49 | BOUNDARY_L_GOAL_L_FEATURE_PROBABILITY_MAP = {
50 | '0': ProbabilityMap(regions=[(2, 22), (3, 22), (4, 22), (14, 22), (15, 22), (16, 22)]),
51 | '90': ProbabilityMap(regions=[(5, 2), (5, 3), (5, 4), (5, 23), (5, 24), (5, 25)]),
52 | '180': ProbabilityMap(regions=[(2, 5), (3, 5), (4, 5), (14, 5), (15, 5), (16, 5)]),
53 | '270': ProbabilityMap(regions=[(13, 2), (13, 3), (13, 4), (13, 23), (13, 24), (13, 25)])
54 | }
55 |
56 | PARALLEL_FEATURE_PROBABILITY_MAP = {
57 | '0': ProbabilityMap(regions=[(5, 22), (6, 22), (7, 22), (8, 22), (9, 22), (10, 22), (11, 22), (12, 22), (13, 22)]),
58 | '90': ProbabilityMap(regions=[]),
59 | '180': ProbabilityMap(regions=[(5, 5), (6, 5), (7, 5), (8, 5), (9, 5), (10, 5), (11, 5), (12, 5), (13, 5)]),
60 | '270': ProbabilityMap(regions=[])
61 | }
62 |
63 | BOUNDARY_T_GOAL_L_FEATURE_PROBABILITY_MAP = {
64 | '0': ProbabilityMap(regions=[(2, 22), (3, 22), (4, 22), (14, 22), (15, 22), (16, 22)]),
65 | '90': ProbabilityMap(regions=[(7, 1), (7, 2), (7, 25), (7, 26)]),
66 | '180': ProbabilityMap(regions=[(2, 5), (3, 5), (4, 5), (14, 5), (15, 5), (16, 5)]),
67 | '270': ProbabilityMap(regions=[(11, 1), (11, 2), (11, 25), (11, 26)])
68 | }
69 |
70 |
71 | # Zeros template
72 | # ProbabilityMap(map=np.array([
73 | # # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
74 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 0
75 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 1
76 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 2
77 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 3
78 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 4
79 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 5
80 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 6
81 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 7
82 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 8
83 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 9
84 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 10
85 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 11
86 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 12
87 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 13
88 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 14
89 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 15
90 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 16
91 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # 17
92 | # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # 18
93 | # ])
--------------------------------------------------------------------------------
/src/slam/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(slam)
3 |
4 | ## Compile as C++11, supported in ROS Kinetic and newer
5 | # add_compile_options(-std=c++11)
6 |
7 | ## Find catkin macros and libraries
8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9 | ## is used, also find other catkin packages
10 | find_package(catkin REQUIRED COMPONENTS
11 | rospy
12 | std_msgs
13 | message_generation
14 | )
15 |
16 | ## System dependencies are found with CMake's conventions
17 | # find_package(Boost REQUIRED COMPONENTS system)
18 |
19 |
20 | ## Uncomment this if the package has a setup.py. This macro ensures
21 | ## modules and global scripts declared therein get installed
22 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
23 | # catkin_python_setup()
24 |
25 | ################################################
26 | ## Declare ROS messages, services and actions ##
27 | ################################################
28 |
29 | ## To declare and build messages, services or actions from within this
30 | ## package, follow these steps:
31 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
32 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
33 | ## * In the file package.xml:
34 | ## * add a build_depend tag for "message_generation"
35 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
36 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
37 | ## but can be declared for certainty nonetheless:
38 | ## * add a exec_depend tag for "message_runtime"
39 | ## * In this file (CMakeLists.txt):
40 | ## * add "message_generation" and every package in MSG_DEP_SET to
41 | ## find_package(catkin REQUIRED COMPONENTS ...)
42 | ## * add "message_runtime" and every package in MSG_DEP_SET to
43 | ## catkin_package(CATKIN_DEPENDS ...)
44 | ## * uncomment the add_*_files sections below as needed
45 | ## and list every .msg/.srv/.action file to be processed
46 | ## * uncomment the generate_messages entry below
47 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
48 |
49 | ## Generate messages in the 'msg' folder
50 | add_message_files(
51 | FILES
52 | RobotPosition.msg
53 | )
54 |
55 | ## Generate services in the 'srv' folder
56 | add_service_files(
57 | FILES
58 | ToggleLocalization.srv
59 | )
60 |
61 | ## Generate actions in the 'action' folder
62 | # add_action_files(
63 | # FILES
64 | # Action1.action
65 | # Action2.action
66 | # )
67 |
68 | ## Generate added messages and services with any dependencies listed here
69 | generate_messages()
70 |
71 | ################################################
72 | ## Declare ROS dynamic reconfigure parameters ##
73 | ################################################
74 |
75 | ## To declare and build dynamic reconfigure parameters within this
76 | ## package, follow these steps:
77 | ## * In the file package.xml:
78 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
79 | ## * In this file (CMakeLists.txt):
80 | ## * add "dynamic_reconfigure" to
81 | ## find_package(catkin REQUIRED COMPONENTS ...)
82 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
83 | ## and list every .cfg file to be processed
84 |
85 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
86 | # generate_dynamic_reconfigure_options(
87 | # cfg/DynReconf1.cfg
88 | # cfg/DynReconf2.cfg
89 | # )
90 |
91 | ###################################
92 | ## catkin specific configuration ##
93 | ###################################
94 | ## The catkin_package macro generates cmake config files for your package
95 | ## Declare things to be passed to dependent projects
96 | ## INCLUDE_DIRS: uncomment this if your package contains header files
97 | ## LIBRARIES: libraries you create in this project that dependent projects also need
98 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
99 | ## DEPENDS: system dependencies of this project that dependent projects also need
100 | catkin_package(
101 | # INCLUDE_DIRS include
102 | # LIBRARIES slam
103 | # CATKIN_DEPENDS rospy std_msgs
104 | # DEPENDS system_lib
105 | )
106 |
107 | ###########
108 | ## Build ##
109 | ###########
110 |
111 | ## Specify additional locations of header files
112 | ## Your package locations should be listed before other locations
113 | include_directories(
114 | # include
115 | ${catkin_INCLUDE_DIRS}
116 | )
117 |
118 | ## Declare a C++ library
119 | # add_library(${PROJECT_NAME}
120 | # src/${PROJECT_NAME}/slam.cpp
121 | # )
122 |
123 | ## Add cmake target dependencies of the library
124 | ## as an example, code may need to be generated before libraries
125 | ## either from message generation or dynamic reconfigure
126 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
127 |
128 | ## Declare a C++ executable
129 | ## With catkin_make all packages are built within a single CMake context
130 | ## The recommended prefix ensures that target names across packages don't collide
131 | # add_executable(${PROJECT_NAME}_node src/slam_node.cpp)
132 |
133 | ## Rename C++ executable without prefix
134 | ## The above recommended prefix causes long target names, the following renames the
135 | ## target back to the shorter version for ease of user use
136 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
137 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
138 |
139 | ## Add cmake target dependencies of the executable
140 | ## same as for the library above
141 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
142 |
143 | ## Specify libraries to link a library or executable target against
144 | # target_link_libraries(${PROJECT_NAME}_node
145 | # ${catkin_LIBRARIES}
146 | # )
147 |
148 | #############
149 | ## Install ##
150 | #############
151 |
152 | # all install targets should use catkin DESTINATION variables
153 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
154 |
155 | ## Mark executable scripts (Python etc.) for installation
156 | ## in contrast to setup.py, you can choose the destination
157 | # install(PROGRAMS
158 | # scripts/my_python_script
159 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
160 | # )
161 |
162 | ## Mark executables and/or libraries for installation
163 | # install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
164 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
165 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
166 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
167 | # )
168 |
169 | ## Mark cpp header files for installation
170 | # install(DIRECTORY include/${PROJECT_NAME}/
171 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
172 | # FILES_MATCHING PATTERN "*.h"
173 | # PATTERN ".svn" EXCLUDE
174 | # )
175 |
176 | ## Mark other files for installation (e.g. launch and bag files, etc.)
177 | # install(FILES
178 | # # myfile1
179 | # # myfile2
180 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
181 | # )
182 |
183 | #############
184 | ## Testing ##
185 | #############
186 |
187 | ## Add gtest based cpp test target and link libraries
188 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_slam.cpp)
189 | # if(TARGET ${PROJECT_NAME}-test)
190 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
191 | # endif()
192 |
193 | ## Add folders to be run by python nosetests
194 | # catkin_add_nosetests(test)
195 |
--------------------------------------------------------------------------------
/src/slam/src/map_components.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import cv2 as cv
4 | import numpy as np
5 | import rospy
6 |
7 |
8 | class MapComponent(object):
9 | MARKER_SIZE = 30
10 |
11 | def __init__(self, row=-1, column=-1):
12 | self.row = row
13 | self.column = column
14 |
15 | class TriangleMapComponent(MapComponent):
16 |
17 | # OpenCV mat 90 degrees = Cartesian 270 degrees
18 | ANGLES = [0, 3*math.pi/2, math.pi, math.pi/2]
19 |
20 | def __init__(self, row, column, angle, colour):
21 | super(TriangleMapComponent, self).__init__(row=row, column=column)
22 | self.angle = angle
23 |
24 | self.marker = np.array([(10, 0), (-7, -6), (-7, 6)], np.int32)
25 | self.colour = colour
26 |
27 |
28 | def draw(self, img, row_offset=0, column_offset=0, angle_offset=0):
29 | row = self.row + row_offset
30 | column = self.column + column_offset
31 | angle = self.ANGLES[self.angle] + angle_offset
32 |
33 | # Compute rotation matrix
34 | cos_angle, sin_angle = np.cos(angle), np.sin(angle)
35 | rotation_matrix = np.array(((cos_angle, sin_angle), (-sin_angle, cos_angle)))
36 |
37 | # Dot matrix to transform (rotate) robot
38 | pts = np.dot(self.marker, rotation_matrix).astype(np.int32).reshape((-1, 1, 2))
39 | pts = np.add(pts, np.array([(column * self.MARKER_SIZE) + self.MARKER_SIZE/2,
40 | (row * self.MARKER_SIZE) + self.MARKER_SIZE/2]).reshape(-1, 2))
41 |
42 | return cv.fillPoly(img, pts=[pts], color=self.colour)
43 |
44 |
45 | class Particle(TriangleMapComponent):
46 | def __init__(self, row, column, angle):
47 | super(Particle, self).__init__(row=row, column=column, angle=angle, colour=(0, 0, 0))
48 |
49 |
50 | class Robot(TriangleMapComponent):
51 |
52 | def __init__(self, angle=0):
53 | super(Robot, self).__init__(row=-1, column=-1, angle=angle, colour=(0, 0, 255))
54 |
55 | def set_init_position(self, x=-1, y=-1, turn_left=False, turn_right=False):
56 | if x != -1 and y != -1:
57 | self.row = y / self.MARKER_SIZE
58 | self.column = x / self.MARKER_SIZE
59 | elif turn_left:
60 | self.angle = (self.angle + 1) % len(self.ANGLES)
61 | elif turn_right:
62 | self.angle = (self.angle - 1) % len(self.ANGLES)
63 |
64 | def update_position(self, row, column, angle):
65 | # Move robot based on probability of best particle
66 | self.row = row
67 | self.column = column
68 | self.angle = angle
69 |
70 | def move_forward(self, steps, right_offset=0):
71 | if self.angle == 0:
72 | self.row += right_offset
73 | self.column += steps
74 | elif self.angle == 1:
75 | self.row -= steps
76 | self.column += right_offset
77 | elif self.angle == 2:
78 | self.row -= right_offset
79 | self.column -= steps
80 | elif self.angle == 3:
81 | self.row += steps
82 | self.column -= right_offset
83 |
84 | def turn_left(self):
85 | self.angle = (self.angle + 1) % 4
86 |
87 | def turn_right(self):
88 | self.angle = (self.angle - 1) % 4
89 |
90 | def draw_turn_left45(self, img):
91 | return self.draw(img=img, angle_offset=-math.pi/4)
92 |
93 | def draw_turn_right45(self, img):
94 | return self.draw(img=img, angle_offset=math.pi/4)
95 |
96 | # def draw_left_offset(self, img):
97 | # robot_map = None
98 |
99 | # if self.angle == 0:
100 | # robot_map = self.draw(img=img, row_offset=-1)
101 | # elif self.angle == 1:
102 | # robot_map = self.draw(img=img, column_offset=-1)
103 | # elif self.angle == 2:
104 | # robot_map = self.draw(img=img, row_offset=1)
105 | # elif self.angle == 3:
106 | # robot_map = self.draw(img=img, column_offset=1)
107 |
108 | # return robot_map
109 |
110 | # def draw_right_offset(self, img):
111 | # robot_map = None
112 |
113 | # if self.angle == 0:
114 | # robot_map = self.draw(img=img, row_offset=1)
115 | # elif self.angle == 1:
116 | # robot_map = self.draw(img=img, column_offset=1)
117 | # elif self.angle == 2:
118 | # robot_map = self.draw(img=img, row_offset=-1)
119 | # elif self.angle == 3:
120 | # robot_map = self.draw(img=img, column_offset=-1)
121 |
122 | # return robot_map
123 |
124 |
125 | class Obstacles(object):
126 |
127 | def __init__(self, num_map_rows, num_map_columns):
128 | self.obstacles = []
129 | self.colour = (255, 0, 255)
130 | self.num_map_rows = num_map_rows
131 | self.num_map_columns = num_map_columns
132 |
133 | def add_obstacle(self, row, column, angle, offset=1, grid_size=3, is_left=False, is_center=False, is_right=False):
134 | obstacle_row, obstacle_column = self.robot_pose_to_grid_origin(row=row, column=column, angle=angle,
135 | offset=offset, grid_size=grid_size)
136 |
137 | if is_left:
138 | if angle == 0:
139 | obstacle_row -= 1
140 | elif angle == 1:
141 | obstacle_column -= 1
142 | elif angle == 2:
143 | obstacle_row += 1
144 | elif angle == 3:
145 | obstacle_column += 1
146 | elif is_right:
147 | if angle == 0:
148 | obstacle_row += 1
149 | elif angle == 1:
150 | obstacle_column += 1
151 | elif angle == 2:
152 | obstacle_row -= 1
153 | elif angle == 3:
154 | obstacle_column -= 1
155 |
156 | if 0 <= obstacle_row < self.num_map_rows and 0 <= obstacle_column < self.num_map_columns:
157 | self.obstacles += [MapComponent(row=obstacle_row, column=obstacle_column)]
158 |
159 | def update_obstacle_list(self, row, column, angle, offset=2, grid_size=3):
160 | grid_row, grid_column = self.robot_pose_to_grid_origin(row=row, column=column, angle=angle,
161 | offset=offset, grid_size=grid_size)
162 | grid = [(grid_row + row_idx, grid_column + column_idx) for row_idx in range(-1, 2) for column_idx in range(-1, 2)]
163 |
164 | for obstacle in list(self.obstacles):
165 | if (obstacle.row, obstacle.column) in grid:
166 | self.obstacles.remove(obstacle)
167 |
168 | def robot_pose_to_grid_origin(self, row, column, angle, offset, grid_size):
169 | if grid_size % 2 == 0:
170 | rospy.logerr('Obstacle mapping: grid_size must be an odd number.')
171 |
172 | offset = offset + grid_size/2 + 1
173 | if angle == 0:
174 | column += offset
175 | elif angle == 1:
176 | row -= offset
177 | elif angle == 2:
178 | column -= offset
179 | elif angle == 3:
180 | row += offset
181 |
182 | return row, column
183 |
184 | def draw(self, img):
185 | for obstacle in self.obstacles:
186 | img = cv.circle(img=img, radius=10, color=self.colour, thickness=-1,
187 | center=((obstacle.column * obstacle.MARKER_SIZE) + obstacle.MARKER_SIZE/2,
188 | (obstacle.row * obstacle.MARKER_SIZE) + obstacle.MARKER_SIZE/2))
189 |
190 | return img
191 |
--------------------------------------------------------------------------------
/src/vision/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(vision)
3 |
4 | ## Compile as C++11, supported in ROS Kinetic and newer
5 | # add_compile_options(-std=c++11)
6 |
7 | ## Find catkin macros and libraries
8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9 | ## is used, also find other catkin packages
10 | find_package(catkin REQUIRED COMPONENTS
11 | rospy
12 | std_msgs
13 | message_generation
14 | )
15 |
16 | ## System dependencies are found with CMake's conventions
17 | # find_package(Boost REQUIRED COMPONENTS system)
18 |
19 |
20 | ## Uncomment this if the package has a setup.py. This macro ensures
21 | ## modules and global scripts declared therein get installed
22 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
23 | # catkin_python_setup()
24 |
25 | ################################################
26 | ## Declare ROS messages, services and actions ##
27 | ################################################
28 |
29 | ## To declare and build messages, services or actions from within this
30 | ## package, follow these steps:
31 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
32 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
33 | ## * In the file package.xml:
34 | ## * add a build_depend tag for "message_generation"
35 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
36 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
37 | ## but can be declared for certainty nonetheless:
38 | ## * add a exec_depend tag for "message_runtime"
39 | ## * In this file (CMakeLists.txt):
40 | ## * add "message_generation" and every package in MSG_DEP_SET to
41 | ## find_package(catkin REQUIRED COMPONENTS ...)
42 | ## * add "message_runtime" and every package in MSG_DEP_SET to
43 | ## catkin_package(CATKIN_DEPENDS ...)
44 | ## * uncomment the add_*_files sections below as needed
45 | ## and list every .msg/.srv/.action file to be processed
46 | ## * uncomment the generate_messages entry below
47 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
48 |
49 | ## Generate messages in the 'msg' folder
50 | add_message_files(
51 | FILES
52 | DetectedLines.msg
53 | DetectedObstacle.msg
54 | )
55 |
56 | ## Generate services in the 'srv' folder
57 | # add_service_files(
58 | # FILES
59 | # Service1.srv
60 | # Service2.srv
61 | # )
62 |
63 | ## Generate actions in the 'action' folder
64 | # add_action_files(
65 | # FILES
66 | # Action1.action
67 | # Action2.action
68 | # )
69 |
70 | ## Generate added messages and services with any dependencies listed here
71 | generate_messages()
72 |
73 | ################################################
74 | ## Declare ROS dynamic reconfigure parameters ##
75 | ################################################
76 |
77 | ## To declare and build dynamic reconfigure parameters within this
78 | ## package, follow these steps:
79 | ## * In the file package.xml:
80 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
81 | ## * In this file (CMakeLists.txt):
82 | ## * add "dynamic_reconfigure" to
83 | ## find_package(catkin REQUIRED COMPONENTS ...)
84 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
85 | ## and list every .cfg file to be processed
86 |
87 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
88 | # generate_dynamic_reconfigure_options(
89 | # cfg/DynReconf1.cfg
90 | # cfg/DynReconf2.cfg
91 | # )
92 |
93 | ###################################
94 | ## catkin specific configuration ##
95 | ###################################
96 | ## The catkin_package macro generates cmake config files for your package
97 | ## Declare things to be passed to dependent projects
98 | ## INCLUDE_DIRS: uncomment this if your package contains header files
99 | ## LIBRARIES: libraries you create in this project that dependent projects also need
100 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
101 | ## DEPENDS: system dependencies of this project that dependent projects also need
102 | catkin_package(
103 | # INCLUDE_DIRS include
104 | # LIBRARIES vision
105 | CATKIN_DEPENDS rospy std_msgs message_runtime
106 | # DEPENDS system_lib
107 | )
108 |
109 | ###########
110 | ## Build ##
111 | ###########
112 |
113 | ## Specify additional locations of header files
114 | ## Your package locations should be listed before other locations
115 | include_directories(
116 | # include
117 | ${catkin_INCLUDE_DIRS}
118 | )
119 |
120 | ## Declare a C++ library
121 | # add_library(${PROJECT_NAME}
122 | # src/${PROJECT_NAME}/vision.cpp
123 | # )
124 |
125 | ## Add cmake target dependencies of the library
126 | ## as an example, code may need to be generated before libraries
127 | ## either from message generation or dynamic reconfigure
128 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
129 |
130 | ## Declare a C++ executable
131 | ## With catkin_make all packages are built within a single CMake context
132 | ## The recommended prefix ensures that target names across packages don't collide
133 | # add_executable(${PROJECT_NAME}_node src/vision_node.cpp)
134 |
135 | ## Rename C++ executable without prefix
136 | ## The above recommended prefix causes long target names, the following renames the
137 | ## target back to the shorter version for ease of user use
138 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
139 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
140 |
141 | ## Add cmake target dependencies of the executable
142 | ## same as for the library above
143 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
144 |
145 | ## Specify libraries to link a library or executable target against
146 | # target_link_libraries(${PROJECT_NAME}_node
147 | # ${catkin_LIBRARIES}
148 | # )
149 |
150 | #############
151 | ## Install ##
152 | #############
153 |
154 | # all install targets should use catkin DESTINATION variables
155 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
156 |
157 | ## Mark executable scripts (Python etc.) for installation
158 | ## in contrast to setup.py, you can choose the destination
159 | # install(PROGRAMS
160 | # scripts/my_python_script
161 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
162 | # )
163 |
164 | ## Mark executables and/or libraries for installation
165 | # install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
166 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
167 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
168 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
169 | # )
170 |
171 | ## Mark cpp header files for installation
172 | # install(DIRECTORY include/${PROJECT_NAME}/
173 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
174 | # FILES_MATCHING PATTERN "*.h"
175 | # PATTERN ".svn" EXCLUDE
176 | # )
177 |
178 | ## Mark other files for installation (e.g. launch and bag files, etc.)
179 | # install(FILES
180 | # # myfile1
181 | # # myfile2
182 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
183 | # )
184 |
185 | #############
186 | ## Testing ##
187 | #############
188 |
189 | ## Add gtest based cpp test target and link libraries
190 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_vision.cpp)
191 | # if(TARGET ${PROJECT_NAME}-test)
192 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
193 | # endif()
194 |
195 | ## Add folders to be run by python nosetests
196 | # catkin_add_nosetests(test)
197 |
--------------------------------------------------------------------------------
/src/navigation/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(navigation)
3 |
4 | ## Compile as C++11, supported in ROS Kinetic and newer
5 | # add_compile_options(-std=c++11)
6 |
7 | ## Find catkin macros and libraries
8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9 | ## is used, also find other catkin packages
10 | find_package(catkin REQUIRED COMPONENTS
11 | rospy
12 | std_msgs
13 | )
14 |
15 | ## System dependencies are found with CMake's conventions
16 | # find_package(Boost REQUIRED COMPONENTS system)
17 |
18 |
19 | ## Uncomment this if the package has a setup.py. This macro ensures
20 | ## modules and global scripts declared therein get installed
21 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
22 | # catkin_python_setup()
23 |
24 | ################################################
25 | ## Declare ROS messages, services and actions ##
26 | ################################################
27 |
28 | ## To declare and build messages, services or actions from within this
29 | ## package, follow these steps:
30 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
31 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
32 | ## * In the file package.xml:
33 | ## * add a build_depend tag for "message_generation"
34 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
35 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
36 | ## but can be declared for certainty nonetheless:
37 | ## * add a exec_depend tag for "message_runtime"
38 | ## * In this file (CMakeLists.txt):
39 | ## * add "message_generation" and every package in MSG_DEP_SET to
40 | ## find_package(catkin REQUIRED COMPONENTS ...)
41 | ## * add "message_runtime" and every package in MSG_DEP_SET to
42 | ## catkin_package(CATKIN_DEPENDS ...)
43 | ## * uncomment the add_*_files sections below as needed
44 | ## and list every .msg/.srv/.action file to be processed
45 | ## * uncomment the generate_messages entry below
46 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
47 |
48 | ## Generate messages in the 'msg' folder
49 | # add_message_files(
50 | # FILES
51 | # Message1.msg
52 | # Message2.msg
53 | # )
54 |
55 | ## Generate services in the 'srv' folder
56 | # add_service_files(
57 | # FILES
58 | # Service1.srv
59 | # Service2.srv
60 | # )
61 |
62 | ## Generate actions in the 'action' folder
63 | # add_action_files(
64 | # FILES
65 | # Action1.action
66 | # Action2.action
67 | # )
68 |
69 | ## Generate added messages and services with any dependencies listed here
70 | # generate_messages(
71 | # DEPENDENCIES
72 | # std_msgs
73 | # )
74 |
75 | ################################################
76 | ## Declare ROS dynamic reconfigure parameters ##
77 | ################################################
78 |
79 | ## To declare and build dynamic reconfigure parameters within this
80 | ## package, follow these steps:
81 | ## * In the file package.xml:
82 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
83 | ## * In this file (CMakeLists.txt):
84 | ## * add "dynamic_reconfigure" to
85 | ## find_package(catkin REQUIRED COMPONENTS ...)
86 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
87 | ## and list every .cfg file to be processed
88 |
89 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
90 | # generate_dynamic_reconfigure_options(
91 | # cfg/DynReconf1.cfg
92 | # cfg/DynReconf2.cfg
93 | # )
94 |
95 | ###################################
96 | ## catkin specific configuration ##
97 | ###################################
98 | ## The catkin_package macro generates cmake config files for your package
99 | ## Declare things to be passed to dependent projects
100 | ## INCLUDE_DIRS: uncomment this if your package contains header files
101 | ## LIBRARIES: libraries you create in this project that dependent projects also need
102 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
103 | ## DEPENDS: system dependencies of this project that dependent projects also need
104 | catkin_package(
105 | # INCLUDE_DIRS include
106 | # LIBRARIES navigation
107 | # CATKIN_DEPENDS rospy std_msgs
108 | # DEPENDS system_lib
109 | )
110 |
111 | ###########
112 | ## Build ##
113 | ###########
114 |
115 | ## Specify additional locations of header files
116 | ## Your package locations should be listed before other locations
117 | include_directories(
118 | # include
119 | ${catkin_INCLUDE_DIRS}
120 | )
121 |
122 | ## Declare a C++ library
123 | # add_library(${PROJECT_NAME}
124 | # src/${PROJECT_NAME}/navigation.cpp
125 | # )
126 |
127 | ## Add cmake target dependencies of the library
128 | ## as an example, code may need to be generated before libraries
129 | ## either from message generation or dynamic reconfigure
130 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
131 |
132 | ## Declare a C++ executable
133 | ## With catkin_make all packages are built within a single CMake context
134 | ## The recommended prefix ensures that target names across packages don't collide
135 | # add_executable(${PROJECT_NAME}_node src/navigation_node.cpp)
136 |
137 | ## Rename C++ executable without prefix
138 | ## The above recommended prefix causes long target names, the following renames the
139 | ## target back to the shorter version for ease of user use
140 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
141 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
142 |
143 | ## Add cmake target dependencies of the executable
144 | ## same as for the library above
145 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
146 |
147 | ## Specify libraries to link a library or executable target against
148 | # target_link_libraries(${PROJECT_NAME}_node
149 | # ${catkin_LIBRARIES}
150 | # )
151 |
152 | #############
153 | ## Install ##
154 | #############
155 |
156 | # all install targets should use catkin DESTINATION variables
157 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
158 |
159 | ## Mark executable scripts (Python etc.) for installation
160 | ## in contrast to setup.py, you can choose the destination
161 | # install(PROGRAMS
162 | # scripts/my_python_script
163 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
164 | # )
165 |
166 | ## Mark executables and/or libraries for installation
167 | # install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
168 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
169 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
170 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
171 | # )
172 |
173 | ## Mark cpp header files for installation
174 | # install(DIRECTORY include/${PROJECT_NAME}/
175 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
176 | # FILES_MATCHING PATTERN "*.h"
177 | # PATTERN ".svn" EXCLUDE
178 | # )
179 |
180 | ## Mark other files for installation (e.g. launch and bag files, etc.)
181 | # install(FILES
182 | # # myfile1
183 | # # myfile2
184 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
185 | # )
186 |
187 | #############
188 | ## Testing ##
189 | #############
190 |
191 | ## Add gtest based cpp test target and link libraries
192 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_navigation.cpp)
193 | # if(TARGET ${PROJECT_NAME}-test)
194 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
195 | # endif()
196 |
197 | ## Add folders to be run by python nosetests
198 | # catkin_add_nosetests(test)
199 |
--------------------------------------------------------------------------------
/src/navigation/src/walking_module.py:
--------------------------------------------------------------------------------
1 | import json
2 | import math
3 | import os
4 | from time import sleep
5 |
6 | import roslib.packages as rospkg
7 | import rospy
8 | from std_msgs.msg import String
9 |
10 | from op3_walking_module_msgs.msg import WalkingParam
11 |
12 | from op3_module import OP3Module
13 |
14 | class WalkingModule(OP3Module):
15 |
16 | def __init__(self):
17 | super(WalkingModule, self).__init__(module_name='walking_module')
18 |
19 | self.is_initial_walk = False
20 |
21 | # Initialize OP3 ROS publishers
22 | self.set_walking_command_pub = rospy.Publisher('/robotis/walking/command', String, queue_size=0)
23 | self.set_walking_param_pub = rospy.Publisher('/robotis/walking/set_params', WalkingParam, queue_size=0)
24 | self.module_control_pub = rospy.Publisher('/robotis/enable_ctrl_module', String, queue_size=0)
25 |
26 | # Initialize configuration param variables
27 | self.current_walking_param = WalkingParam()
28 | self.initial_param = None
29 | self.marching_param = None
30 | self.forward_param = None
31 | self.turn_left_param = None
32 | self.turn_right_param = None
33 |
34 | # Load configurations
35 | config_file_path = os.path.join(rospkg.get_pkg_dir('navigation'), 'config', 'walking_configurations.json')
36 | self.load_configuration(file_path=config_file_path)
37 |
38 | def enable(self):
39 | self.is_initial_walk = True
40 |
41 | message = String()
42 | message.data = 'walking_module'
43 |
44 | self.module_control_pub.publish(message)
45 |
46 | sleep(2)
47 |
48 | def load_configuration(self, file_path):
49 | rospy.loginfo('Loading walking configurations.')
50 |
51 | with open(file_path, 'r') as file:
52 | configuration = json.load(file)
53 |
54 | # Generic walking parameters
55 | self.current_walking_param.init_x_offset = configuration['init_x_offset']
56 | self.current_walking_param.init_y_offset = configuration['init_y_offset']
57 | self.current_walking_param.init_z_offset = configuration['init_z_offset']
58 | self.current_walking_param.init_roll_offset = configuration['init_roll_offset']
59 | self.current_walking_param.init_pitch_offset = configuration['init_pitch_offset']
60 | self.current_walking_param.init_yaw_offset = configuration['init_yaw_offset']
61 | self.current_walking_param.dsp_ratio = configuration['dsp_ratio']
62 | self.current_walking_param.step_fb_ratio = configuration['step_fb_ratio']
63 | self.current_walking_param.move_aim_on = configuration['move_aim_on']
64 | self.current_walking_param.balance_enable = configuration['balance_enable']
65 | self.current_walking_param.balance_hip_roll_gain = configuration['balance_hip_roll_gain']
66 | self.current_walking_param.balance_knee_gain = configuration['balance_knee_gain']
67 | self.current_walking_param.balance_ankle_roll_gain = configuration['balance_ankle_roll_gain']
68 | self.current_walking_param.balance_ankle_pitch_gain = configuration['balance_ankle_pitch_gain']
69 | self.current_walking_param.y_swap_amplitude = configuration['y_swap_amplitude']
70 | self.current_walking_param.z_swap_amplitude = configuration['z_swap_amplitude']
71 | self.current_walking_param.arm_swing_gain = configuration['arm_swing_gain']
72 | self.current_walking_param.pelvis_offset = configuration['pelvis_offset']
73 | self.current_walking_param.hip_pitch_offset = math.radians(configuration['hip_pitch_offset'])
74 | self.current_walking_param.p_gain = configuration['p_gain']
75 | self.current_walking_param.i_gain = configuration['i_gain']
76 | self.current_walking_param.d_gain = configuration['d_gain']
77 |
78 | # Initial dx, dy, xz, dtheta, period_time
79 | self.initial_param = {
80 | 'x': configuration['initial']['x_move_amplitude'],
81 | 'y': configuration['initial']['y_move_amplitude'],
82 | 'z': configuration['initial']['z_move_amplitude'],
83 | 'angle': math.radians(configuration['initial']['angle_move_amplitude']),
84 | 'period_time': configuration['initial']['period_time']
85 | }
86 |
87 | # Marching dx, dy, dz, dtheta, period_time
88 | self.marching_param = {
89 | 'x': configuration['marching']['x_move_amplitude'],
90 | 'y': configuration['marching']['y_move_amplitude'],
91 | 'z': configuration['marching']['z_move_amplitude'],
92 | 'angle': math.radians(configuration['marching']['angle_move_amplitude']),
93 | 'period_time': configuration['marching']['period_time']
94 | }
95 |
96 | # Forward dx, dy, dz, dtheta, period_time
97 | self.forward_param = {
98 | 'x': configuration['forward']['x_move_amplitude'],
99 | 'y': configuration['forward']['y_move_amplitude'],
100 | 'z': configuration['forward']['z_move_amplitude'],
101 | 'angle': math.radians(configuration['forward']['angle_move_amplitude']),
102 | 'period_time': configuration['forward']['period_time']
103 | }
104 |
105 | # Turn left dx, dy, dz, dtheta, period_time
106 | self.turn_left_param = {
107 | 'x': configuration['turn_left']['x_move_amplitude'],
108 | 'y': configuration['turn_left']['y_move_amplitude'],
109 | 'z': configuration['turn_left']['z_move_amplitude'],
110 | 'angle': math.radians(configuration['turn_left']['angle_move_amplitude']),
111 | 'period_time': configuration['turn_left']['period_time']
112 | }
113 |
114 | # Turn right dx, dy, dz, dtheta, period_time
115 | self.turn_right_param = {
116 | 'x': configuration['turn_right']['x_move_amplitude'],
117 | 'y': configuration['turn_right']['y_move_amplitude'],
118 | 'z': configuration['turn_right']['z_move_amplitude'],
119 | 'angle': math.radians(configuration['turn_right']['angle_move_amplitude']),
120 | 'period_time': configuration['turn_right']['period_time']
121 | }
122 |
123 | def __update_walking_param(self, walking_param):
124 | self.current_walking_param.x_move_amplitude = walking_param['x']
125 | self.current_walking_param.y_move_amplitude = walking_param['y']
126 | self.current_walking_param.z_move_amplitude = walking_param['z']
127 | self.current_walking_param.angle_move_amplitude = walking_param['angle']
128 | self.current_walking_param.period_time = walking_param['period_time']
129 |
130 | self.set_walking_param_pub.publish(self.current_walking_param)
131 | sleep(1)
132 |
133 | def __send_walking_command(self, command='start'):
134 | message = String()
135 | message.data = command
136 |
137 | self.set_walking_command_pub.publish(message)
138 | sleep(1)
139 |
140 | def __walk(self, param_name, walking_param):
141 | rospy.loginfo('Switching walking gait: {0}'.format(param_name))
142 |
143 | if self.is_initial_walk:
144 | self.__update_walking_param(walking_param=self.initial_param)
145 | self.__send_walking_command(command='start')
146 | self.is_initial_walk = False
147 | sleep(2)
148 |
149 | self.__update_walking_param(walking_param=walking_param)
150 |
151 | def march(self):
152 | self.__walk(param_name='marching', walking_param=self.marching_param)
153 |
154 | def forward(self):
155 | self.__walk(param_name='forward', walking_param=self.forward_param)
156 |
157 | def turn_left(self):
158 | self.__walk(param_name='turn_left', walking_param=self.turn_left_param)
159 |
160 | def turn_right(self):
161 | self.__walk(param_name='turn_right', walking_param=self.turn_right_param)
162 |
163 | def stop(self):
164 | rospy.loginfo('Stopping walk.')
165 | self.__send_walking_command(command='stop')
166 |
167 |
168 |
169 |
--------------------------------------------------------------------------------
/src/vision/src/vision_node.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from collections import OrderedDict
4 | import os
5 |
6 | import cv2 as cv
7 | import rospy
8 | import roslib.packages as rospkg
9 | import numpy as np
10 |
11 | from cv_mat import CVMat
12 | from field_object import Field, Lines, Obstacle
13 | from line_segment_detection import LineSegmentDetector
14 | from obstacle_detection import ObstacleDetector
15 | import opencv_gui as gui
16 | from configuration import Configuration
17 |
18 |
19 | def get_region_colour_space_values_cb(event, x, y, _frame):
20 | global colour_space_roi, configuration, selected_object
21 |
22 | # Retrieve average values for colour space
23 | if event == cv.EVENT_LBUTTONDOWN:
24 | colour_space_roi = (x, y)
25 | elif event == cv.EVENT_LBUTTONUP:
26 | roi_mean = cv.mean(_frame[colour_space_roi[1]:y, colour_space_roi[0]:x])
27 | selected_object.set_colour_space_value(roi_mean[:-1])
28 | configuration.update(selected_object.name, selected_object.export_configuration())
29 | colour_space_roi = None
30 |
31 |
32 | def set_colour_space_threshold_cb(threshold):
33 | global selected_object, configuration
34 |
35 | # Change colour space threshold for selected object
36 | selected_object.set_colour_space_threshold(threshold=threshold)
37 | configuration.update(selected_object.name, selected_object.export_configuration())
38 |
39 |
40 | def switch_selected_obstacle_cb(value):
41 | global selected_object, field_object_list
42 |
43 | # Switch the tuning object
44 | selected_object = field_object_list.values()[value]
45 | colour_space_threshold = selected_object.threshold
46 | gui.set_colour_space_threshold_trackbar_position(colour_space_threshold)
47 |
48 |
49 | def track_lines(lines, lines_mat, field_frame=None):
50 | global field_object_list, output, selected_object
51 |
52 | lines_mat.extract_lines(thresh_lb=lines.lower_bound, thresh_ub=lines.upper_bound, field=field_frame)
53 |
54 | # Find the lines and corners, classify each line
55 | lines_mat.lsd(max_distance_apart=lines.max_distance_apart, min_length=lines.min_length)
56 | lines_mat.find_corners(max_distance_apart=lines.corner_max_distance_apart)
57 | field_lines = lines_mat.classify_lines()
58 |
59 | # Draw lines if found
60 | if field_lines:
61 | for line_type in field_lines.keys():
62 | if line_type == 'boundary':
63 | colour = lines.output_boundary_colour
64 | elif line_type == 'goal_area':
65 | colour = lines.output_goal_area_colour
66 | elif line_type == 'center':
67 | colour = lines.output_center_colour
68 | else:
69 | colour = lines.output_undefined_colour
70 |
71 | for line_set in field_lines[line_type][0]:
72 | for line in line_set[0]:
73 | pt1, pt2 = line.to_cv_line()
74 | cv.line(output.frame, pt1=pt1, pt2=pt2, color=colour, thickness=3)
75 |
76 | # Publish lines found
77 | lines.publish_msg(field_lines if field_lines else [])
78 |
79 | # Check if the obstacle is selected, show in debug if selected
80 | if selected_object == field_object_list['lines']:
81 | gui.show_debug_window(lines_mat.frame, lines.name)
82 |
83 |
84 | def track_obstacle(obstacle, obstacle_mat):
85 | global field_object_list, output, selected_object
86 |
87 | # Extract obstacle from frame
88 | obstacle_mat.extract_obstacle(thresh_lb=obstacle.lower_bound, thresh_ub=obstacle.upper_bound)
89 |
90 | # Find the obstacle using bounding rectangle, draw any if found
91 | positions, position_text, area = obstacle_mat.bounding_rect(min_area=obstacle.min_area, max_area=obstacle.max_area,
92 | frame_height=FRAME_HEIGHT, frame_width=FRAME_WIDTH,
93 | flush_width=flush_width)
94 |
95 | # Draw rectangle and publish ROS message
96 | if positions is not None:
97 | for tl_pt, br_pt in positions:
98 | cv.rectangle(output.frame, tl_pt, br_pt, obstacle.output_colour, thickness=2)
99 | obstacle.publish_msg(position_text, area)
100 |
101 | # Check if the obstacle is selected, show in debug if selected
102 | if selected_object == field_object_list[obstacle.name]:
103 | gui.show_debug_window(obstacle_mat.frame, obstacle.name)
104 |
105 |
106 | if __name__ == '__main__':
107 | # Load configurations
108 | configuration = Configuration(configuration_directory=os.path.join(rospkg.get_pkg_dir('vision'), 'config'))
109 | configuration.load()
110 | colour_space_roi = None
111 |
112 | # Initialize ROS
113 | rospy.init_node('vision_node')
114 | rate = rospy.Rate(10)
115 |
116 | # Initialize obstacles/lines and load configurations
117 | field = Field(configuration=configuration.config['field'])
118 | lines = Lines(configuration=configuration.config['lines'])
119 | obstacle = Obstacle(configuration=configuration.config['obstacle'])
120 | field_object_list = OrderedDict([('field', field), ('lines', lines), ('obstacle', obstacle)])
121 | selected_object = field
122 |
123 | # Create GUI and set callbacks
124 | gui.set_mouse_cb('camera', lambda event, x, y, flags, params:
125 | get_region_colour_space_values_cb(event, x, y, original.colour_space_frame))
126 | gui.create_trackbar(gui.DEBUG_WINDOW_NAME, trackbar_name='Obstacles', default_value=0,
127 | max_value=len(field_object_list.values()) - 1, callback=switch_selected_obstacle_cb)
128 | gui.create_colour_space_threshold_trackbar(selected_object.threshold, set_colour_space_threshold_cb)
129 |
130 | # Open camera device
131 | cap = cv.VideoCapture(configuration.config['camera_index'])
132 | original_height, original_width = cap.read()[1].shape[:2]
133 | FRAME_HEIGHT = configuration.config['resized_frame_height']
134 | FRAME_WIDTH = int(round(FRAME_HEIGHT * (original_width / float(original_height))))
135 | del original_height, original_width
136 |
137 | # Create flush mask
138 | flush_width = int(round(FRAME_WIDTH * configuration.config['flush_ratio'] / 2))
139 | flush_mask = np.ones((FRAME_HEIGHT, FRAME_WIDTH, 1), dtype=np.uint8) * 255
140 | flush_mask = cv.rectangle(flush_mask, pt1=(0, 0), pt2=(flush_width, FRAME_HEIGHT), color=(0,0,0), thickness=-1)
141 | flush_mask = cv.rectangle(flush_mask, pt1=(FRAME_WIDTH - flush_width, 0), pt2=(FRAME_WIDTH, FRAME_HEIGHT), color=(0,0,0), thickness=-1)
142 |
143 | try:
144 | while not rospy.is_shutdown():
145 | ret_code, raw_image = cap.read()
146 |
147 | if raw_image.data:
148 | original = CVMat(raw_image, height=FRAME_HEIGHT)
149 |
150 | # Create an output frame from a clone of the original
151 | output = original.clone()
152 |
153 | # Extract background from soccer field
154 | field_mat = original.clone()
155 | field_mat.background_mask(thresh_lb=field.lower_bound, thresh_ub=field.upper_bound,
156 | min_area=field.min_area, line_width=lines.max_width)
157 | if selected_object == field_object_list['field']:
158 | gui.show_debug_window(field_mat.frame, field.name)
159 | # output.frame = cv.bitwise_and(output.frame, output.frame, mask=field_mat.frame)
160 |
161 | # Extract lines from soccer field and detect lines using LSD
162 | lines_mat = LineSegmentDetector(frame=original.frame)
163 | track_lines(lines=lines, lines_mat=lines_mat, field_frame=field_mat.frame)
164 |
165 | # Extract obstacles from field
166 | # flush_frame = cv.bitwise_and(original.frame, original.frame, mask=flush_mask)
167 | obstacle_mat = ObstacleDetector(frame=original.frame)
168 | track_obstacle(obstacle=obstacle, obstacle_mat=obstacle_mat)
169 |
170 | # Display GUI windows
171 | gui.show('camera', original.frame)
172 | gui.show('output', output.frame)
173 |
174 | cv.waitKey(1)
175 | except rospy.ROSInterruptException:
176 | # Clean up
177 | gui.teardown()
178 | cap.release()
179 |
--------------------------------------------------------------------------------
/src/navigation/src/navigation_node.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from enum import Enum
4 | import os
5 | from time import sleep, time
6 |
7 | import rosnode
8 | import rospy
9 | import roslib.packages as rospkg
10 | from std_msgs.msg import Bool, String
11 |
12 | from head_control_module import HeadControlModule
13 | from walking_module import WalkingModule
14 | from obstacle import Obstacle
15 | from robot import Robot, RobotMovement
16 | from slam.srv import ToggleLocalization
17 | from vision.msg import DetectedObstacle, DetectedLines
18 | from slam.msg import RobotPosition
19 |
20 |
21 | class RobotMode(Enum):
22 | READY = 0
23 | RUNNING = 1
24 |
25 |
26 | def button_callback(msg):
27 | global robot_mode, mode_change
28 |
29 | if msg.data == 'start':
30 | # Start button pressed
31 | # Send robot to RUNNING mode
32 | if robot_mode == RobotMode.READY:
33 | robot_mode = RobotMode.RUNNING
34 | mode_change = True
35 | elif robot_mode == RobotMode.RUNNING:
36 | rospy.logerr('Robot already in running mode.')
37 |
38 | elif msg.data == 'mode':
39 | # Mode button pressed
40 | # Send robot to READY mode
41 | robot_mode = RobotMode.READY
42 | mode_change = True
43 |
44 |
45 | def wait_for_node(node_name):
46 | # Wait for node to start
47 | while node_name not in rosnode.get_node_names():
48 | rospy.logwarn('Node is not running: {0}'.format(node_name))
49 | sleep(1)
50 |
51 | rospy.loginfo('Node is running: {0}'.format(node_name))
52 |
53 |
54 | def memorize_lines(msg):
55 | global lines, look_at_lines
56 |
57 | if look_at_lines:
58 | if 'T' in msg.boundary_line and 'L' in msg.goal_line:
59 | lines += ['TL']
60 | elif 'L' in msg.goal_line:
61 | lines += ['gL']
62 | elif 'center' in msg.center_line:
63 | lines += ['center']
64 | else:
65 | lines += ['']
66 |
67 |
68 | def split_list(alist, wanted_parts=1):
69 | length = len(alist)
70 | return [alist[i*length // wanted_parts: (i+1)*length // wanted_parts] for i in range(wanted_parts)]
71 |
72 |
73 | def avoid_obstacles():
74 | global mode_change, walking_module, obstacle, rate, toggle_localization_srv, walking_rates
75 |
76 | previous_movement = None
77 |
78 | while not rospy.is_shutdown() and not mode_change:
79 | # Get next movement of robot based on obstacle and map
80 | movement = robot.next_movement(avoid_obstacle=obstacle.is_center, num_map_rows=NUM_MAP_ROWS,
81 | num_map_columns=NUM_MAP_COLUMNS)
82 |
83 | if movement == RobotMovement.OBSTACLE:
84 | # Avoid obstacle right in front of robot with localization turned off
85 | rospy.loginfo('[Navigation] avoid obstacle')
86 |
87 | toggle_localization_srv(pause=True, obstacle=True, start=False, turn_left=False, turn_right=False, place_left=False, place_right=False, place_center=False)
88 | walking_module.turn_right()
89 | sleep(robot.turn_right_rate / 4.0)
90 | walking_module.forward()
91 | sleep(robot.forward_rate * 8)
92 | walking_module.turn_left()
93 | sleep(robot.turn_left_rate / 4.0)
94 | toggle_localization_srv(pause=False)
95 |
96 | elif movement == RobotMovement.FORWARD:
97 | # Continue forward
98 | rospy.loginfo('[Navigation] forward')
99 |
100 | if previous_movement != RobotMovement.FORWARD:
101 | walking_module.forward()
102 |
103 | elif movement == RobotMovement.TURN_LEFT:
104 | # Turn left
105 | rospy.loginfo('[Navigation] turn left')
106 |
107 | toggle_localization_srv(pause=True, turn_left=True, start=False, obstacle=False, turn_right=False, place_left=False, place_right=False, place_center=False)
108 | walking_module.turn_left()
109 | sleep(robot.turn_left_rate)
110 | toggle_localization_srv(pause=False)
111 |
112 | elif movement == RobotMovement.TURN_RIGHT:
113 | # Turn right
114 | rospy.loginfo('[Navigation] turn right')
115 |
116 | toggle_localization_srv(pause=True, turn_right=True, start=False, turn_left=False, obstacle=False, place_left=False, place_right=False, place_center=False)
117 | walking_module.turn_right()
118 | sleep(robot.turn_right_rate)
119 | toggle_localization_srv(pause=False)
120 |
121 | previous_movement = movement
122 |
123 | rate.sleep()
124 |
125 |
126 | if __name__ == '__main__':
127 |
128 | # Initalize global variables
129 | robot_mode = RobotMode.READY
130 | mode_change = False
131 | robot = Robot(rate_config_path=os.path.join(rospkg.get_pkg_dir('navigation'), 'config', 'walking_rate.json'))
132 | obstacle = Obstacle()
133 | SPIN_RATE = 0.5
134 | NUM_MAP_ROWS, NUM_MAP_COLUMNS = 19, 28
135 | look_at_lines = False
136 | lines = []
137 |
138 | # Wait for other nodes to launch
139 | wait_for_node('/op3_manager')
140 | wait_for_node('/vision_node')
141 | wait_for_node('/slam_node')
142 |
143 | # Initialze ROS node
144 | rospy.init_node('navigation_node')
145 | rate = rospy.Rate(SPIN_RATE)
146 |
147 | # Setup ROS publisher
148 | toggle_localization_srv = rospy.ServiceProxy('/slam/toggle_localization', ToggleLocalization)
149 |
150 | # Initialize OP3 subscriber
151 | button_sub = rospy.Subscriber('/robotis/open_cr/button', String, button_callback)
152 |
153 | # Initialze vision subscriber
154 | line_vision_sub = rospy.Subscriber('/vision/lines', DetectedLines, memorize_lines)
155 | obstacle_vision_sub = rospy.Subscriber('/vision/obstacle', DetectedObstacle, obstacle.update)
156 |
157 | # Initialize SLAM subscriber
158 | robot_position_sub = rospy.Subscriber('/slam/robot_position', RobotPosition, robot.update_pose)
159 |
160 | # Initializing modules
161 | head_control_module = HeadControlModule()
162 | walking_module = WalkingModule()
163 |
164 | # Enabling walking module
165 | sleep(10)
166 | rospy.loginfo('Press start button to begin.')
167 |
168 | while not rospy.is_shutdown():
169 | if mode_change:
170 | mode_change = False
171 |
172 | if robot_mode == RobotMode.READY:
173 | rospy.loginfo('Resetting OP3. Press start button to begin.')
174 |
175 | walking_module.enable()
176 | walking_module.stop()
177 | head_control_module.enable()
178 | head_control_module.look_down()
179 |
180 | elif robot_mode == RobotMode.RUNNING:
181 | rospy.loginfo('Starting navigating soccer field.')
182 |
183 | walking_module.enable()
184 | head_control_module.enable()
185 |
186 | # Try to find starting cell
187 | head_control_module.look_left()
188 | sleep(1.5)
189 | look_at_lines = True
190 | head_control_module.look_right()
191 | sleep(2.5)
192 | look_at_lines = False
193 |
194 | left_lines, center_lines, right_lines = split_list(lines, wanted_parts=3)
195 | lines = [max(left_lines, key=left_lines.count),
196 | max(center_lines, key=center_lines.count),
197 | max(right_lines, key=right_lines.count)]
198 |
199 | if lines[0] == 'TL':
200 | toggle_localization_srv(place_left=True, start=False, pause=True, obstacle=False, turn_left=False, turn_right=False, place_center=False, place_right=False)
201 | elif lines[2] == 'TL':
202 | toggle_localization_srv(place_right=True, start=False, pause=True, obstacle=False, turn_left=False, turn_right=False, place_left=False, place_center=False)
203 | else:
204 | toggle_localization_srv(place_center=True, start=False, pause=True, obstacle=False, turn_left=False, turn_right=False, place_left=False, place_right=False)
205 |
206 | walking_module.enable()
207 | walking_module.march()
208 | head_control_module.enable()
209 | head_control_module.look_down()
210 | sleep(3)
211 |
212 | toggle_localization_srv(start=True)
213 | avoid_obstacles()
214 |
215 | rate.sleep()
216 |
--------------------------------------------------------------------------------
/src/slam/src/particles.py:
--------------------------------------------------------------------------------
1 | import math
2 | import sys
3 | from copy import deepcopy
4 |
5 | import numpy as np
6 | import rospy
7 |
8 | class Particle(object):
9 |
10 | def __init__(self, map_rows, map_columns, row, column, angle, probability):
11 | self.map_rows = map_rows
12 | self.map_columns = map_columns
13 |
14 | self.row = row
15 | self.column = column
16 | self.angle = angle
17 | self.probability = probability
18 | self.valid = True
19 |
20 | self.randomize(row=row, column=column, angle=angle, probability=probability)
21 |
22 | def randomize(self, row, column, angle, probability):
23 | # Generate a particle nearby the belief row and column that is within the map
24 | new_row, new_column = self.generate_normal(row=row, column=column, angle=angle, probability=probability)
25 | while not self.in_map(row=new_row, column=new_column):
26 | new_row, new_column = self.generate_normal(row=row, column=column, angle=angle, probability=probability)
27 |
28 | # Assign values
29 | self.row = new_row
30 | self.column = new_column
31 | self.angle = angle
32 | self.probability = probability
33 | self.valid = True
34 |
35 | def generate_normal(self, row, column, angle, probability):
36 | row_scale = 0.6 - probability
37 | column_scale = 0.6 - probability
38 |
39 | if angle == 0 or angle == 2:
40 | row_scale /= 2.0
41 | elif angle == 1 or angle ==3:
42 | column_scale /= 2.0
43 |
44 | return int(round(np.random.normal(row, scale=row_scale if row_scale > 0 else 0.01))), \
45 | int(round(np.random.normal(column, scale=column_scale if column_scale > 0 else 0.01)))
46 |
47 | def move(self, estimated_num_cells):
48 | row = self.row
49 | column = self.column
50 |
51 | # Move particle for estimated steps in angle direction
52 | if self.angle == 0:
53 | column += estimated_num_cells
54 | elif self.angle == 1:
55 | row -= estimated_num_cells
56 | elif self.angle == 2:
57 | column -= estimated_num_cells
58 | elif self.angle == 3:
59 | row += estimated_num_cells
60 |
61 | # Check whether particle is still within map
62 | if self.in_map(row=row, column=column):
63 | self.row = row
64 | self.column = column
65 | else:
66 | self.probability = 0.0
67 | self.valid = False
68 |
69 | def in_map(self, row, column):
70 | return 0 <= row < self.map_rows and 0 <= column < self.map_columns
71 |
72 |
73 | class Particles(object):
74 |
75 | def __init__(self, row, column, angle, map_rows, map_columns, num_particles=5):
76 | self.particles = [Particle(map_rows=map_rows, map_columns=map_columns, row=row, column=column,
77 | angle=angle, probability=1.0 / num_particles)
78 | for _ in range(num_particles)]
79 |
80 | def even_probability(self):
81 | return 1.0 / len(self.particles)
82 |
83 | def randomize(self, row, column, angle):
84 | probability = self.even_probability()
85 | for particle in self.particles:
86 | particle.randomize(row=row, column=column, angle=angle, probability=probability)
87 |
88 | def random_sample(self, row, column, estimated_num_cells, probability_map):
89 | best_candidate = None
90 |
91 | # Best candidate will be on highest probability of probability map
92 | if probability_map is not None:
93 | best_region = None
94 | shortest_distance = sys.maxsize
95 | for region_row, region_column in probability_map.regions:
96 | distance = math.sqrt(math.fabs(region_row - row)**2 + math.fabs(region_column - column)**2)
97 | if distance < shortest_distance:
98 | best_region = (region_row, region_column)
99 | shortest_distance = distance
100 | if best_region is not None:
101 | best_candidate = self.particles[0]
102 | best_candidate.row, best_candidate.column = best_region
103 | best_candidate.probability = probability_map.map[best_candidate.row][best_candidate.column]
104 | best_candidate.valid = True
105 |
106 | # Pick the best candidate based on distance to the previous best candidate
107 | if best_candidate is None:
108 | shortest_distance = sys.maxsize
109 | best_particle = None
110 |
111 | for particle in self.particles:
112 | distance = math.sqrt(math.fabs(particle.row - row)**2 + math.fabs(particle.column - column)**2)
113 | if distance < shortest_distance:
114 | shortest_distance = distance
115 | best_particle = particle
116 | elif (distance == shortest_distance) and \
117 | ((particle.angle == 0 and particle.column - column > best_particle.column - column) or \
118 | (particle.angle == 1 and particle.row - row < best_particle.row - row) or \
119 | (particle.angle == 2 and particle.column - column < best_particle.column - column) or \
120 | (particle.angle == 3 and particle.row - row > best_particle.row - row)):
121 | best_particle = particle
122 |
123 | best_candidate = best_particle
124 |
125 | # Trim number of particles based on best candidate probability
126 | if best_candidate.probability > self.even_probability() and len(self.particles) > 1:
127 | self.particles.pop(0)
128 |
129 |
130 | # if best_candidate.probability > self.even_probability():
131 | # # Trim number of particles based on best candidate probability
132 | # if len(self.particles) > 1:
133 | # self.particles.pop(0)
134 | # else:
135 | # # Pick the best candidate based on distance to the previous best candidate
136 | # shortest_distance = sys.maxsize
137 | # best_particle = None
138 |
139 | # for particle in self.particles:
140 | # distance = math.sqrt(math.fabs(particle.row - row)**2 + math.fabs(particle.column - column)**2)
141 | # if distance < shortest_distance:
142 | # shortest_distance = distance
143 | # best_particle = particle
144 | # elif (distance == shortest_distance) and \
145 | # ((particle.angle == 0 and particle.column - column > best_particle.column - column) or \
146 | # (particle.angle == 1 and particle.row - row < best_particle.row - row) or \
147 | # (particle.angle == 2 and particle.column - column < best_particle.column - column) or \
148 | # (particle.angle == 3 and particle.row - row > best_particle.row - row)):
149 | # best_particle = particle
150 |
151 | # best_candidate = best_particle
152 |
153 | for particle in self.particles:
154 | # Randomize based on best candidate origin
155 | # Scale of randomization is based on probability of best candidate
156 | particle.randomize(row=best_candidate.row, column=best_candidate.column,
157 | angle=best_candidate.angle, probability=best_candidate.probability)
158 |
159 | # Move each particle for the estimated number of cells in angle direction
160 | particle.move(estimated_num_cells=estimated_num_cells)
161 |
162 | # Update probability based on probability map
163 | if particle.valid and probability_map is not None:
164 | particle.probability = probability_map.map[int(particle.row)][int(particle.column)]
165 | else:
166 | particle.probability = 0.0
167 |
168 | # Normalize probabilities of particles
169 | total_probability = sum(particle.probability for particle in self.particles)
170 | if total_probability > 0.0:
171 | for particle in self.particles:
172 | particle.probability = particle.probability / float(total_probability)
173 | else:
174 | for particle in self.particles:
175 | particle.probability = self.even_probability()
176 |
177 | return best_candidate
178 |
--------------------------------------------------------------------------------
/src/slam/src/slam_node.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import json
4 | import math
5 | import os
6 | from time import sleep, time
7 |
8 | import cv2 as cv
9 | import roslib.packages as rospkg
10 | import rosnode
11 | import rospy
12 | import numpy as np
13 |
14 | from map_components import Robot, Obstacles, Particle
15 | from particles import Particles
16 | from probability_map import CENTER_LINE_FEATURE_PROBABILITY_MAP, BOUNDARY_T_FEATURE_PROBABILITY_MAP, \
17 | BOUNDARY_L_FEATURE_PROBABILITY_MAP, GOAL_L_FEATURE_PROBABILITY_MAP, \
18 | BOUNDARY_L_GOAL_L_FEATURE_PROBABILITY_MAP, PARALLEL_FEATURE_PROBABILITY_MAP, \
19 | BOUNDARY_T_GOAL_L_FEATURE_PROBABILITY_MAP
20 | from vision.msg import DetectedObstacle, DetectedLines
21 | from slam.srv import ToggleLocalization
22 | from slam.msg import RobotPosition
23 |
24 |
25 | def set_init_robot_position(event, x, y, flags, param):
26 | global robot
27 |
28 | if event == cv.EVENT_LBUTTONDOWN:
29 | robot.set_init_position(x=x, y=y)
30 |
31 |
32 | def toggle_localization(request):
33 | global pause_localization, start_localization, movement_avoid_obstacle, movement_turn_left, movement_turn_right
34 |
35 | pause_localization = request.pause
36 | start_localization = request.start
37 |
38 | if request.place_left:
39 | robot.row = 18
40 | robot.column = 8
41 | elif request.place_center:
42 | robot.row = 18
43 | robot.column = 13
44 | elif request.place_right:
45 | robot.row = 18
46 | robot.column = 19
47 |
48 | movement_avoid_obstacle = request.obstacle
49 | movement_turn_left = request.turn_left
50 | movement_turn_right = request.turn_right
51 |
52 | return True
53 |
54 |
55 | def update_probability_map(msg):
56 | global robot, probability_map
57 |
58 | map_idx = str(int(round(math.degrees(int(robot.angle) * math.pi/2))))
59 |
60 | if 'T' in msg.boundary_line and 'L' in msg.goal_line:
61 | probability_map = BOUNDARY_T_GOAL_L_FEATURE_PROBABILITY_MAP[map_idx]
62 | elif 'L' in msg.boundary_line and 'L' in msg.goal_line:
63 | probability_map = BOUNDARY_L_GOAL_L_FEATURE_PROBABILITY_MAP[map_idx]
64 | elif 'parallel' in msg.boundary_line or 'parallel' in msg.goal_line:
65 | probability_map = PARALLEL_FEATURE_PROBABILITY_MAP[map_idx]
66 | elif 'T' in msg.boundary_line:
67 | probability_map = BOUNDARY_T_FEATURE_PROBABILITY_MAP[map_idx]
68 | elif 'L' in msg.boundary_line:
69 | probability_map = BOUNDARY_L_FEATURE_PROBABILITY_MAP[map_idx]
70 | elif 'L' in msg.goal_line:
71 | probability_map = GOAL_L_FEATURE_PROBABILITY_MAP[map_idx]
72 | elif 'center' in msg.center_line:
73 | probability_map = CENTER_LINE_FEATURE_PROBABILITY_MAP[map_idx]
74 | else:
75 | probability_map = None
76 |
77 |
78 | def add_obstacle_to_map(msg):
79 | global obstacles, map_obstacle, map_obstacle_counter
80 |
81 | if map_obstacle:
82 | # Erase obstacles in front of robot
83 | # obstacles.update_obstacle_list(row=robot.row, column=robot.column, angle=robot.angle)
84 |
85 | # Add obstacle based on vision
86 | if (msg.is_left or msg.is_center or msg.is_right) and map_obstacle_counter == 0:
87 | obstacles.add_obstacle(row=robot.row, column=robot.column, angle=robot.angle,
88 | is_left=msg.is_left, is_center=msg.is_center, is_right=msg.is_right)
89 | map_obstacle_counter = 5
90 |
91 |
92 | def draw_grid(field, grid_size, colour):
93 | field_height, field_width, _ = field.shape
94 |
95 | for x in range(field_width / GRID_SIZE):
96 | cv.line(field, pt1=(GRID_SIZE * (x + 1), 0),
97 | pt2=(GRID_SIZE * (x + 1), field_height), color=colour)
98 | for y in range(field_height / GRID_SIZE):
99 | cv.line(field, pt1=(0, GRID_SIZE * (y + 1)),
100 | pt2=(field_width, GRID_SIZE * (y + 1)), color=colour)
101 |
102 | return field
103 |
104 |
105 | def draw_field(field, robot=None, particles=None, obstacles=None):
106 | if particles is not None:
107 | for particle in particles.particles:
108 | field = Particle(row=particle.row, column=particle.column, angle=particle.angle).draw(field)
109 |
110 | if obstacles is not None:
111 | field = obstacles.draw(field)
112 |
113 | if robot is not None:
114 | field = robot.draw(field)
115 |
116 | return field
117 |
118 |
119 | def localize():
120 | global robot, particles, t0, WALK_FORWARD_RATE, robot_position_pub
121 | cells_per_second = 1 / (WALK_FORWARD_RATE / 0.65)
122 | estimated_num_cells = 0.0
123 |
124 | # Wait until robot walks at least 1 cell
125 | while estimated_num_cells < 1 and not rospy.is_shutdown():
126 | time_delta = time() - t0
127 | t0 = time()
128 | estimated_num_cells += time_delta * cells_per_second
129 | estimated_num_cells = int(round(estimated_num_cells))
130 |
131 | # Randomly generate particles and move based on probability
132 | best_particle = particles.random_sample(row=robot.row, column=robot.column,
133 | estimated_num_cells=estimated_num_cells,
134 | probability_map=probability_map)
135 |
136 | # Choose best particle and move robot accordingly
137 | rospy.loginfo('[SLAM] particle: prob - {0:0.2f} particles - {1:2d}' \
138 | .format(best_particle.probability, len(particles.particles)))
139 | robot.update_position(row=best_particle.row, column=best_particle.column, angle=best_particle.angle)
140 |
141 | # Update navigation with robot pose
142 | robot_position_pub.publish(row=robot.row, column=robot.column, angle=robot.angle)
143 |
144 |
145 | if __name__ == '__main__':
146 | # Load configurations
147 | with open(os.path.join(rospkg.get_pkg_dir('navigation'), 'config', 'walking_rate.json')) as config:
148 | walking_rates = json.load(config)
149 | WALK_FORWARD_RATE = walking_rates['forward']
150 | TURN_LEFT_RATE = walking_rates['turn_left']
151 | TURN_RIGHT_RATE = walking_rates['turn_right']
152 |
153 | # Declare constants
154 | GRID_SIZE = 30
155 | GRID_COLOUR = (128, 128, 128)
156 | NUM_MAP_ROWS, NUM_MAP_COLUMNS = 19, 28
157 |
158 | # Initalize global variables
159 | robot = Robot(angle=1)
160 | obstacles = Obstacles(num_map_rows=NUM_MAP_ROWS, num_map_columns=NUM_MAP_COLUMNS)
161 | particles = None
162 | probability_map = None
163 |
164 | # Localization control variables
165 | start_localization = False
166 | pause_localization = True
167 | started_once = False
168 | movement_avoid_obstacle = False
169 | movement_turn_left = False
170 | movement_turn_right = False
171 | map_obstacle = True
172 | map_obstacle_counter = 0
173 |
174 | # Initialze ROS node
175 | SPIN_RATE = 10
176 | rospy.init_node('slam_node')
177 | rate = rospy.Rate(SPIN_RATE)
178 |
179 | # SLAM services and publishers
180 | toggle_localization_srv = rospy.Service('/slam/toggle_localization', ToggleLocalization, toggle_localization)
181 | robot_position_pub = rospy.Publisher('/slam/robot_position', RobotPosition, queue_size=1)
182 |
183 | # Vision subscribers
184 | vision_line_sub = rospy.Subscriber('/vision/lines', DetectedLines, update_probability_map)
185 | vision_obstacle_sub = rospy.Subscriber('/vision/obstacle', DetectedObstacle, add_obstacle_to_map)
186 |
187 | # Load map
188 | FIELD = cv.imread(os.path.join(rospkg.get_pkg_dir('slam'), 'static', 'soccer_field.png'), cv.IMREAD_UNCHANGED)
189 | FIELD = draw_grid(FIELD, grid_size=GRID_SIZE, colour=GRID_COLOUR)
190 |
191 | # Create map window and callbacks
192 | cv.namedWindow('map')
193 | cv.setMouseCallback('map', on_mouse=set_init_robot_position)
194 | cv.imshow('map', FIELD)
195 |
196 | while not rospy.is_shutdown():
197 |
198 | if map_obstacle_counter > 0:
199 | map_obstacle_counter -= 1
200 |
201 | cv.imshow('map', draw_field(field=FIELD.copy(), robot=robot, particles=particles, obstacles=obstacles))
202 | user_input = cv.waitKey(1)
203 |
204 | if start_localization:
205 | cv.setMouseCallback('map', on_mouse=lambda event, x, y, flags, param: None)
206 |
207 | # Create particles at the current robot pose
208 | particles = Particles(row=robot.row, column=robot.column, angle=robot.angle,
209 | map_rows=NUM_MAP_ROWS, map_columns=NUM_MAP_COLUMNS)
210 |
211 | rospy.loginfo('[SLAM] start localization')
212 | t0 = time()
213 | start_localization = False
214 | pause_localization = False
215 | started_once = True
216 |
217 | elif not pause_localization:
218 | localize()
219 |
220 | elif pause_localization and started_once:
221 | # Pause localization and update with "actions"
222 |
223 | if movement_avoid_obstacle:
224 | # Turn right and navigate around obstacle
225 | map_obstacle = False
226 | movement_avoid_obstacle = False
227 |
228 | cv.imshow('map', draw_field(field=robot.draw_turn_right45(FIELD.copy()), obstacles=obstacles))
229 | cv.waitKey(1)
230 | robot_position_pub.publish(row=robot.row, column=robot.column, angle=robot.angle)
231 | sleep(TURN_RIGHT_RATE / 2.0)
232 |
233 | robot.move_forward(steps=1, right_offset=1)
234 | cv.imshow('map', draw_field(field=FIELD.copy(), robot=robot, obstacles=obstacles))
235 | cv.waitKey(1)
236 | robot_position_pub.publish(row=robot.row, column=robot.column, angle=robot.angle)
237 | sleep(WALK_FORWARD_RATE + TURN_LEFT_RATE / 2.0)
238 |
239 | num_steps = 2
240 | for _ in range(num_steps):
241 | robot.move_forward(steps=1)
242 | cv.imshow('map', draw_field(field=FIELD.copy(), robot=robot, obstacles=obstacles))
243 | cv.waitKey(1)
244 | robot_position_pub.publish(row=robot.row, column=robot.column, angle=robot.angle)
245 | sleep(WALK_FORWARD_RATE / float(num_steps))
246 |
247 | map_obstacle = True
248 |
249 | elif movement_turn_left:
250 | # Turn left
251 | movement_turn_left = False
252 |
253 | robot.turn_left()
254 | cv.imshow('map', draw_field(field=FIELD.copy(), robot=robot, particles=None, obstacles=obstacles))
255 | cv.waitKey(1)
256 | robot_position_pub.publish(row=robot.row, column=robot.column, angle=robot.angle)
257 | sleep(TURN_LEFT_RATE)
258 |
259 | elif movement_turn_right:
260 | # Turn right
261 | movement_turn_right = False
262 |
263 | robot.turn_right()
264 | cv.imshow('map', draw_field(field=FIELD.copy(), robot=robot, particles=None, obstacles=obstacles))
265 | cv.waitKey(1)
266 | robot_position_pub.publish(row=robot.row, column=robot.column, angle=robot.angle)
267 | sleep(TURN_RIGHT_RATE)
268 |
269 | # RESET T0!!!!!
270 | if particles is not None:
271 | particles.randomize(row=robot.row, column=robot.column, angle=robot.angle)
272 | t0 = time()
273 |
274 | else:
275 | # Modify robot angle for initial positioning
276 | robot.set_init_position(turn_left=(user_input == 13))
277 | # robot.set_init_position(turn_left=(user_input == 91), turn_right=(user_input == 93))
278 |
279 | rate.sleep()
280 |
281 |
--------------------------------------------------------------------------------
/src/vision/src/line_segment_detection.py:
--------------------------------------------------------------------------------
1 | from bisect import insort
2 | from enum import Enum
3 | import math
4 | import sys
5 |
6 | import cv2 as cv
7 | import rospy
8 |
9 | from cv_mat import CVMat
10 |
11 |
12 | class LineSegment(object):
13 |
14 | def __init__(self, x1, y1, x2, y2):
15 | pts = sorted([(x1, y1), (x2, y2)], key=lambda pt: pt[0])
16 | self.x1 = pts[0][0]
17 | self.y1 = pts[0][1]
18 | self.x2 = pts[1][0]
19 | self.y2 = pts[1][1]
20 | self.length = self.length(x1=self.x1, y1=self.y1, x2=self.x2, y2=self.y2)
21 | self.angle = self.angle()
22 | self.position = None
23 |
24 | def __lt__(self, other_line_segment):
25 | return self.length < other_line_segment.length
26 |
27 | def distance_to_point(self, pt_x=0, pt_y=0):
28 | t = -((self.x1 - pt_x) * (self.x2 - self.x1) + (self.y1 - pt_y) * (self.y2 - self.y1)) / \
29 | (math.pow(self.x2 - self.x1, 2) + math.pow(self.y2 - self.y1, 2))
30 |
31 | if 0 <= t <= 1:
32 | distance = math.fabs((self.x2 - self.x1) * (self.y1 - pt_y) - (self.y2 - self.y1) * (self.x1 - pt_x)) / \
33 | math.sqrt(math.pow(self.x2 - self.x1, 2) + math.pow(self.y2 - self.y1, 2))
34 | else:
35 | distance = min(math.sqrt(math.pow(self.x1 - pt_x, 2) + math.pow(self.y1 - pt_y, 2)),
36 | math.sqrt(math.pow(self.x2 - pt_x, 2) + math.pow(self.y2 - pt_y, 2)))
37 |
38 | return distance
39 |
40 | def to_cv_line(self):
41 | return (self.to_pixel_value(self.x1), self.to_pixel_value(self.y1)), \
42 | (self.to_pixel_value(self.x2), self.to_pixel_value(self.y2))
43 |
44 | def angle(self):
45 | angle = math.atan2((self.y2 - self.y1), (self.x2 - self.x1))
46 |
47 | if self.x2 != self.x1 and (self.y2 - self.y1) / (self.x2 - self.x1) < 0:
48 | angle = -math.fabs(angle)
49 |
50 | return angle
51 |
52 | @staticmethod
53 | def distance(x1, y1, x2=0, y2=0):
54 | return math.sqrt(math.pow(math.fabs(x2 - x1), 2) + math.pow(math.fabs(y2 - y1), 2))
55 |
56 | @staticmethod
57 | def length(x1, y1, x2, y2):
58 | return LineSegment.distance(x1=x1, y1=y1, x2=x2, y2=y2)
59 |
60 | @staticmethod
61 | def to_pixel_value(pt):
62 | return max(0, int(round(pt)))
63 |
64 |
65 | class Corner(object):
66 |
67 | CornerType = Enum('CornerType', 'l_shape, t_shape')
68 |
69 | def __init__(self, x, y, line1, line2=None):
70 | self.x = x
71 | self.y = y
72 |
73 | if line2 is not None:
74 | min_line = min(line1, line2, key=lambda line: line.length)
75 | max_line = line1 if min_line == line2 else line2
76 | else:
77 | min_line = line1
78 | max_line = None
79 |
80 | self.line1 = min_line
81 | self.line2 = max_line
82 |
83 |
84 | class FieldLines(object):
85 |
86 | def __init__(self, lines, l_corners=0, t_corners=0):
87 | self.lines = lines
88 | self.l_corners = l_corners
89 | self.t_corners = t_corners
90 |
91 | def add_l_corner(self, line):
92 | self.lines += [line]
93 | self.l_corners += 1
94 |
95 | def add_t_corner(self):
96 | self.t_corners += 1
97 |
98 | def has_line(self, line):
99 | return line in self.lines
100 |
101 |
102 | class LineSegmentDetector(CVMat):
103 | __OPENCV_LSD = cv.createLineSegmentDetector(_refine=cv.LSD_REFINE_STD)
104 |
105 | def __init__(self, frame):
106 | CVMat.__init__(self, frame=frame)
107 | self.line_segments = []
108 | self.horizontal = []
109 | self.vertical = []
110 | self.field_lines = []
111 | self.extra_lines = []
112 |
113 | def extract_lines(self, thresh_lb, thresh_ub, field):
114 | self.extract_object(thresh_lb=thresh_lb, thresh_ub=thresh_ub, field=field, kernel_size=3)
115 |
116 | @staticmethod
117 | def __filter_by_length(lines, min_length=-1):
118 | # Return only lines that are longer than min_length
119 | filtered_lines = []
120 |
121 | for line in lines:
122 | if LineSegment.length(x1=line[0, 0], y1=line[0, 1], x2=line[0, 2], y2=line[0, 3]) > min_length:
123 | insort(filtered_lines, LineSegment(x1=line[0, 0], y1=line[0, 1], x2=line[0, 2], y2=line[0, 3]))
124 | filtered_lines.reverse()
125 |
126 | return filtered_lines
127 |
128 | @staticmethod
129 | def __categorize_by_angle(lines):
130 | horizontal = []
131 | vertical = []
132 |
133 | if len(lines) > 0:
134 | # Find the average of the min_angle and max_angle, use that to offset the x-axis
135 | max_angle = max(line.angle for line in lines)
136 | min_angle = min(line.angle for line in lines)
137 | min_max_average = (math.fabs(max_angle) + math.fabs(min_angle)) / 2.0
138 | x_axis_offset = math.pi / 4 if min_max_average > math.pi / 3 else 0
139 |
140 | for line in lines:
141 | angle = line.angle if x_axis_offset == 0 else math.fabs(line.angle)
142 | if angle > x_axis_offset:
143 | vertical += [line]
144 | else:
145 | horizontal += [line]
146 |
147 | return horizontal, vertical
148 |
149 | @staticmethod
150 | def __categorize_by_distance_apart(lines, max_distance_apart, frame_height):
151 | categorize_distance = []
152 |
153 | for line in lines:
154 | closest_group_index = None
155 | shortest_distance_apart = sys.maxint
156 |
157 | # Find a group for the line based on the shortest distance existing grouped lines
158 | for group_index, group in enumerate(categorize_distance):
159 | for reference_line in group:
160 | distance_apart = min(reference_line.distance_to_point(pt_x=line.x1, pt_y=line.y1),
161 | reference_line.distance_to_point(pt_x=line.x2, pt_y=line.y2),
162 | line.distance_to_point(pt_x=reference_line.x1, pt_y=reference_line.y1),
163 | line.distance_to_point(pt_x=reference_line.x2, pt_y=reference_line.y2))
164 |
165 | # Calculate a new max_distance_apart based on the average value of y
166 | # bottom of image --> larger y --> line segments further apart
167 | average_y = (line.y1 + line.y2 + reference_line.y1 + reference_line.y2) / 4.0
168 | distance_apart_threshold = 1 + average_y / frame_height / 2
169 |
170 | if distance_apart < shortest_distance_apart and \
171 | distance_apart < max_distance_apart * distance_apart_threshold:
172 | closest_group_index = group_index
173 | shortest_distance_apart = distance_apart
174 |
175 | # Group two lines together if a grouping was found, otherwise, start a new group
176 | if closest_group_index is not None:
177 | categorize_distance[closest_group_index] += [line]
178 | else:
179 | categorize_distance += [[line]]
180 |
181 | return categorize_distance
182 |
183 | @staticmethod
184 | def __merge_grouped_lines__(groups):
185 | merged_lines = []
186 |
187 | # Find the average of the lines in the group
188 | # Create a new line based on the average
189 | for group in groups:
190 | if len(group) > 1:
191 | x_coordinates = []
192 | y_coordinates = []
193 | average_angle = 0
194 |
195 | for line in group:
196 | x_coordinates += [line.x1, line.x2]
197 | y_coordinates += [line.y1, line.y2]
198 | average_angle += line.angle
199 |
200 | min_x = min(x_coordinates)
201 | max_x = max(x_coordinates)
202 | min_y = min(y_coordinates)
203 | max_y = max(y_coordinates)
204 | angle = average_angle / len(group)
205 |
206 | if angle > 0:
207 | merged_lines += [LineSegment(x1=min_x, y1=min_y, x2=max_x, y2=max_y)]
208 | else:
209 | merged_lines += [LineSegment(x1=min_x, y1=max_y, x2=max_x, y2=min_y)]
210 |
211 | return merged_lines
212 |
213 | def lsd(self, max_distance_apart=10, min_length=15):
214 | # Find all line segment with OpenCV's LSD
215 | lines = self.__OPENCV_LSD.detect(self.frame)[0]
216 |
217 | if lines is not None:
218 | # Filter out lines that do not meet min_length condition
219 | self.line_segments = self.__filter_by_length(lines, min_length=min_length)
220 |
221 | # Classify the lines by angle
222 | horizontal, vertical = self.__categorize_by_angle(self.line_segments)
223 |
224 | # Classify the lines by distance apart
225 | horizontal = self.__categorize_by_distance_apart(horizontal, max_distance_apart, self.frame.shape[0])
226 | vertical = self.__categorize_by_distance_apart(vertical, max_distance_apart, self.frame.shape[0])
227 |
228 | # Find average line in classified groups
229 | horizontal = self.__merge_grouped_lines__(horizontal)
230 | vertical = self.__merge_grouped_lines__(vertical)
231 |
232 | self.horizontal = horizontal
233 | self.vertical = vertical
234 |
235 | @staticmethod
236 | def classify_corner_shape(line, point, max_distance_apart):
237 | # Determine whether corner is L shaped or T shaped
238 | distance_apart = min(LineSegment.distance(x1=line.x1, y1=line.y1, x2=point[0], y2=point[1]),
239 | LineSegment.distance(x1=line.x2, y1=line.y2, x2=point[0], y2=point[1]))
240 |
241 | return Corner.CornerType.l_shape if distance_apart <= max_distance_apart else Corner.CornerType.t_shape
242 |
243 | @staticmethod
244 | def __find_corners(lines, reference_lines, max_distance_apart):
245 | l_shape_corners = []
246 | t_shape_corners = []
247 |
248 | remaining_reference_lines = list(reference_lines)
249 |
250 | for reference_line in reference_lines:
251 | for line in lines:
252 | distance_pt1 = reference_line.distance_to_point(pt_x=line.x1, pt_y=line.y1)
253 | distance_pt2 = reference_line.distance_to_point(pt_x=line.x2, pt_y=line.y2)
254 | distance_apart = min(distance_pt1, distance_pt2)
255 |
256 | # Find the shortest distance between the reference line and two endpoints of line
257 | # Classify as corner if distance_apart is shorter than max_distance_apart
258 | if distance_apart < max_distance_apart:
259 | (pt_x, pt_y) = (line.x1, line.y1) if distance_apart == distance_pt1 else (line.x2, line.y2)
260 | corner_type = LineSegmentDetector.classify_corner_shape(line=reference_line, point=(pt_x, pt_y),
261 | max_distance_apart=max_distance_apart)
262 | if corner_type == Corner.CornerType.l_shape:
263 | l_shape_corners += [Corner(x=pt_x, y=pt_y, line1=line, line2=reference_line)]
264 | if reference_line in remaining_reference_lines:
265 | remaining_reference_lines.remove(reference_line)
266 | else:
267 | t_shape_corners += [Corner(x=pt_x, y=pt_y, line1=reference_line)]
268 | if reference_line in remaining_reference_lines:
269 | remaining_reference_lines.remove(reference_line)
270 |
271 | return l_shape_corners, t_shape_corners, remaining_reference_lines
272 |
273 | def find_corners(self, max_distance_apart=5):
274 | self.field_lines = []
275 |
276 | # Find all possible T and L shape corners
277 | horizontal_l_shape, horizontal_t_shape, extra_vertical = self.__find_corners(lines=self.horizontal,
278 | reference_lines=self.vertical,
279 | max_distance_apart=max_distance_apart)
280 | vertical_l_shape, vertical_t_shape, extra_horizontal = self.__find_corners(lines=self.vertical,
281 | reference_lines=self.horizontal,
282 | max_distance_apart=max_distance_apart)
283 |
284 | # Merge L shape corners
285 | l_shape_corners = []
286 | for horizontal in horizontal_l_shape:
287 | for vertical in vertical_l_shape:
288 | distance_apart = LineSegment.distance(x1=horizontal.x, y1=horizontal.y, x2=vertical.x, y2=vertical.y)
289 | if distance_apart < max_distance_apart:
290 | found_matching_line = False
291 |
292 | for existing_l_shape in l_shape_corners:
293 | if existing_l_shape.has_line(horizontal.line1):
294 | existing_l_shape.add_l_corner(horizontal.line2)
295 | found_matching_line= True
296 | elif existing_l_shape.has_line(horizontal.line2):
297 | existing_l_shape.add_l_corner(horizontal.line1)
298 | found_matching_line = True
299 |
300 | if not found_matching_line:
301 | l_shape_corners += [FieldLines(lines=[horizontal.line1, horizontal.line2], l_corners=1)]
302 |
303 | # Merge T shape corners
304 | t_shape_corners = []
305 | for corner in horizontal_t_shape + vertical_t_shape:
306 | found_matching_line = False
307 |
308 | for existing_t_shape in t_shape_corners:
309 | if existing_t_shape.has_line(corner.line1):
310 | existing_t_shape.add_t_corner()
311 | found_matching_line = True
312 |
313 | if not found_matching_line:
314 | t_shape_corners += [FieldLines(lines=[corner.line1], t_corners=1)]
315 |
316 | # Merge T shape corners with L shape corners into lines
317 | for l_shape in l_shape_corners:
318 | for t_shape in list(t_shape_corners):
319 | if l_shape.has_line(t_shape.lines[0]):
320 | l_shape.add_t_corner()
321 | t_shape_corners.remove(t_shape)
322 |
323 | self.field_lines = l_shape_corners + t_shape_corners
324 | self.extra_lines = extra_horizontal + extra_vertical
325 |
326 | def classify_lines(self):
327 | boundary_line = []
328 | goal_area_line = []
329 | center_line = []
330 | undefined_lines = []
331 |
332 | all_lines = [line for line_set in self.field_lines for line in line_set.lines] + self.extra_lines
333 |
334 | # Center line
335 | if len(all_lines) >= 5 or len(self.extra_lines) >= 3:
336 | center_line = [([max(all_lines, key=lambda line: line.length)], 'center')]
337 | self.field_lines = []
338 | self.extra_lines = []
339 |
340 | # Lines with T corners must be boundary
341 | if len(self.field_lines) > 0:
342 | self.field_lines = sorted(self.field_lines, key=lambda line: line.t_corners, reverse=True)
343 | if 0 < self.field_lines[0].t_corners <= 2:
344 | boundary_line += [(self.field_lines.pop(0).lines, 'T')]
345 |
346 | # Lines with L corners can be boundary or goal area
347 | if len(self.field_lines) > 0:
348 | self.field_lines = sorted(self.field_lines, key=lambda line: line.l_corners, reverse=True)
349 | l_corner_lines = [line for line in self.field_lines if line.l_corners > 0]
350 |
351 | if len(l_corner_lines) > 0:
352 | if len(boundary_line) > 0:
353 | # This L corner must be a goal area since we already have a boundary
354 | goal_area_line += [(self.field_lines.pop(0).lines, 'L')]
355 | elif len(l_corner_lines) == 2:
356 | # The top corner point must be the boundary
357 | # The bottom corner point must be the goal area
358 | line0_y = []
359 | for line in l_corner_lines[0].lines:
360 | line0_y += [line.y1, line.y2]
361 | line0_y = sorted(line0_y)
362 | line1_y = []
363 | for line in l_corner_lines[1].lines:
364 | line1_y += [line.y1, line.y2]
365 | line1_y = sorted(line1_y)
366 |
367 | if line0_y[0] < line1_y[0]:
368 | boundary_line += [(self.field_lines.pop(0).lines, 'L')]
369 | goal_area_line += [(self.field_lines.pop(0).lines, 'L')]
370 | else:
371 | goal_area_line += [(self.field_lines.pop(0).lines, 'L')]
372 | boundary_line += [(self.field_lines.pop(0).lines, 'L')]
373 |
374 | if len(self.field_lines) > 0:
375 | # Lines that are left over are unknown
376 | for line in self.field_lines:
377 | undefined_lines += line.lines
378 | undefined_lines += self.extra_lines
379 |
380 | # 2 Parellel lines = goal area
381 | if len(undefined_lines) == 2 and len(boundary_line) == 0 and len(goal_area_line) == 0 and \
382 | -0.5 <= math.fabs(undefined_lines[0].angle) - math.fabs(undefined_lines[1].angle) <= 0.5 and \
383 | ((undefined_lines[0].angle >= 0 and undefined_lines[1].angle >= 0) or (undefined_lines[0].angle < 0 and undefined_lines[1].angle < 0)):
384 | line_0_distance = undefined_lines[0].distance_to_point()
385 | line_1_distance = undefined_lines[1].distance_to_point()
386 |
387 | if undefined_lines[0].angle >= 0.3 or undefined_lines[1].angle >= 0.3:
388 | if line_0_distance < line_1_distance:
389 | boundary_line += [([undefined_lines[1]], 'parallel')]
390 | goal_area_line += [([undefined_lines[0]], 'parallel')]
391 | else:
392 | boundary_line += [([undefined_lines[0]], 'parallel')]
393 | goal_area_line += [([undefined_lines[1]], 'parallel')]
394 | else:
395 | if line_0_distance >= line_1_distance:
396 | boundary_line += [([undefined_lines[1]], 'parallel')]
397 | goal_area_line += [([undefined_lines[0]], 'parallel')]
398 | else:
399 | boundary_line += [([undefined_lines[0]], 'parallel')]
400 | goal_area_line += [([undefined_lines[1]], 'parallel')]
401 |
402 | undefined_lines = []
403 |
404 | if len(undefined_lines) > 0:
405 | undefined_lines = [(undefined_lines, 'unknown')]
406 |
407 | return {
408 | 'boundary': (boundary_line, self.get_position(boundary_line)),
409 | 'goal_area': (goal_area_line, self.get_position(goal_area_line)),
410 | 'center': (center_line, self.get_position(center_line)),
411 | 'undefined': (undefined_lines, self.get_position(undefined_lines))
412 | }
413 |
414 | def get_position(self, lines):
415 | position_text = ''
416 |
417 | # if len(lines) > 0:
418 | # avg_x = sum(line.x1 + line.x2 for line in lines) / (len(lines) * 2)
419 |
420 | # # Check for direction between self and obstacle
421 | # _, frame_width = self.frame.shape
422 | # left_bound = frame_width * (1.0/4.0)
423 | # right_bound = frame_width * (3.0/4.0)
424 |
425 | # if avg_x <= left_bound:
426 | # position_text = 'left'
427 | # elif avg_x > left_bound and avg_x <= right_bound:
428 | # position_text = 'center'
429 | # elif avg_x > right_bound:
430 | # position_text += 'right'
431 |
432 | return position_text
433 |
434 |
--------------------------------------------------------------------------------