├── .gitignore
├── LICENSE
├── README.md
├── mamp
├── __init__.py
├── agents
│ ├── __init__.py
│ ├── agent.py
│ └── obstacle.py
├── configs
│ ├── __init__.py
│ └── config.py
├── envs
│ ├── __init__.py
│ └── mampenv.py
├── policies
│ ├── __init__.py
│ ├── kdTree.py
│ ├── orca3dPolicy.py
│ ├── orca3dPolicyOfficial.py
│ ├── rvo3dPolicy.py
│ ├── sca
│ │ ├── __init__.py
│ │ ├── dubinsmaneuver2d.py
│ │ ├── dubinsmaneuver3d.py
│ │ ├── rvo3dDubinsPolicy.py
│ │ └── scaPolicy.py
│ └── srvo3dPolicy.py
├── read_map.py
└── util.py
├── run_example
├── run_orca.py
├── run_rvo.py
├── run_rvo3dDubins.py
├── run_sca.py
└── run_srvo.py
└── visualization
├── __init__.py
├── draw_episode.py
├── draw_path.py
├── figs
├── c1.png
├── c2.png
├── example1.gif
├── example21.gif
├── example22.gif
├── example3.png
├── figexp11.png
├── figexp12.png
├── figexp21.png
├── figexp22.png
├── figexp31.png
└── figexp32.png
├── map
└── map.binvox
├── plt3d.py
└── vis_util.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | .idea
3 | visualization/orca3d
4 | visualization/rvo3d
5 | visualization/rvo3d_dubins
6 | visualization/sca
7 | visualization/srvo3d
8 |
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Gang Xu
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Shunted Collision Avoidance (SCA) for Multi-Agent Motion Planning in Three Dimensions
2 |
3 | Python Implementation of shunted collision avoidance for multiple fixed-wing UAVs motion planning, including our method, namely shunted collision avoidance (SCA), the reciprocal volocity obstacles in 3D domains (RVO3D), and the optimal reciprocal collision avoidance in 3D domains (ORCA3D) .
4 |
5 | -----
6 |
7 | Description
8 | -----
9 |
10 | We present an approach for fixed-wing UAVs' collision avoidance, where multiple independent mobile UAVs or agents need to avoid collisions without communication among agents while moving in a common 3D workspace. We named the proposed method as shunted collision avoidance (SCA). In addition, we provide the optimal reciprocal collision avoidance (ORCA) method and reciprocal velocity obstacles (RVO) method in 3D domains for comparison.
11 |
12 | About
13 | -----
14 |
15 | **Paper**: Shunted Collision Avoidance for Multi-UAV Motion Planning with Posture Constraints, Gang Xu, Deye Zhu, Junjie Cao\*, Yong Liu\*, and Jian Yang
16 |
17 |
18 | -----
19 |
20 | Requirement
21 | -----
22 |
23 | ```python
24 | pip install numpy
25 | pip install open3d
26 | pip install pandas
27 | pip install matplotlib
28 | ```
29 |
30 | -----
31 |
32 | Applications
33 | -----
34 |
35 | ```python
36 | cd run_example
37 | python run_sca.py
38 | For the test, you can select the scenarios, including circle, random, take-off and landing, etc.
39 | ```
40 |
41 | #### The scenario of circle for simulating air patrol task.
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 | #### The scenario of take-off and landing.
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 | #### The scenario of circle for simulating low altitude flying for search task.
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 | #### Results of Comparison 1 (Ours: S-RVO3D):
78 |
79 |
80 |
81 |
82 |
83 |
84 | #### Results of Comparison 2 (Ours: SCA):
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 | ----
95 |
96 | Discussion
97 | ----
98 |
99 | In the first comparison, the UAVs' number is 100.
100 |
--------------------------------------------------------------------------------
/mamp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/mamp/__init__.py
--------------------------------------------------------------------------------
/mamp/agents/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/mamp/agents/__init__.py
--------------------------------------------------------------------------------
/mamp/agents/agent.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from math import pi
4 | from mamp.util import l3norm, l3normsq, takeSecond, sqr
5 | from mamp.configs.config import DT
6 |
7 |
8 | class Agent(object):
9 | def __init__(self, start_pos, goal_pos, vel, radius, pref_speed, policy, id, dt=0.1):
10 | self.group = 0
11 | self.policy = policy()
12 |
13 | self.initial_pos = np.array(start_pos, dtype='float64')
14 | self.goal_pos = np.array(goal_pos, dtype='float64')
15 | self.pos_global_frame = np.array(start_pos[:3], dtype='float64')
16 | self.goal_global_frame = np.array(goal_pos[:3], dtype='float64')
17 |
18 | self.heading_global_frame = np.array(start_pos[3:], dtype='float64')
19 | self.goal_heading_frame = np.array(goal_pos[3:], dtype='float64')
20 | self.initial_heading = np.array(start_pos[3:], dtype='float64')
21 |
22 | self.vel_global_frame = np.array(vel)
23 | self.radius = radius
24 | self.turning_radius = 1.5
25 | self.id = id
26 | self.pref_speed = pref_speed
27 | self.pitchlims = [-pi / 4, pi / 4]
28 | self.min_heading_change = self.pitchlims[0]
29 | self.max_heading_change = self.pitchlims[1]
30 |
31 | self.neighbors = []
32 | self.maxNeighbors = 16
33 | self.neighborDist = 10.0
34 | self.timeStep = DT
35 | self.timeHorizon = 10.0
36 | self.maxSpeed = 1.0
37 | self.maxAccel = 1.0
38 | self.safetyFactor = 7.5
39 | self.is_parallel_neighbor = []
40 | self.is_obstacle = False
41 | self.dt_nominal = DT
42 | self.candinate_num = 256
43 |
44 | self.path = []
45 | self.dubins_path = []
46 | self.travelled_traj_node = []
47 | self.num_node = 10
48 | self.desire_points_num = int(0) # for computing time rate dubins
49 | self.desire_path_length = None # for computing distance rate dubins
50 |
51 | self.straight_path_length = l3norm(start_pos[:3], goal_pos[:3])-0.5 # for computing distance rate
52 | self.desire_steps = int(self.straight_path_length / (pref_speed*DT)) # for computing time rate
53 |
54 | self.dubins_now_goal = None
55 | self.dubins_last_goal = None
56 | self.v_pref = np.array([0.0, 0.0, 0.0], dtype='float64')
57 | self.free_collision_time = int(0)
58 |
59 | self.is_back2start = False
60 | self.total_time = 0.0
61 | self.total_dist = 0.0
62 | self.step_num = 0
63 | # self.current_step = 0
64 | # self.current_run_dist = 0.0
65 |
66 | self.history_pos = {}
67 | self.real_path_length = 0.0 # for computing distance rate
68 | self.is_use_dubins = False
69 | self.ref_plane = 'XOY'
70 | self.is_at_goal = False
71 | self.is_collision = False
72 | self.is_out_of_max_time = False
73 | self.is_run_done = False
74 | self.max_run_dist = 3.0 * l3norm(start_pos[:3], goal_pos[:3])
75 | self.ANIMATION_COLUMNS = ['pos_x', 'pos_y', 'pos_z', 'alpha', 'beta', 'gamma', 'vel_x', 'vel_y', 'vel_z',
76 | 'gol_x', 'gol_y', 'gol_z', 'radius']
77 | self.history_info = pd.DataFrame(columns=self.ANIMATION_COLUMNS)
78 |
79 | def insertAgentNeighbor(self, other_agent, rangeSq):
80 | if self.id != other_agent.id:
81 | distSq = l3normsq(self.pos_global_frame, other_agent.pos_global_frame)
82 | if distSq < sqr(self.radius + other_agent.radius) and distSq < rangeSq: # COLLISION!
83 | if not self.is_collision:
84 | self.is_collision = True
85 | self.neighbors.clear()
86 |
87 | if len(self.neighbors) == self.maxNeighbors:
88 | self.neighbors.pop()
89 | self.neighbors.append((other_agent, distSq))
90 | self.neighbors.sort(key=takeSecond)
91 | if len(self.neighbors) == self.maxNeighbors:
92 | rangeSq = self.neighbors[-1][1]
93 | elif not self.is_collision and distSq < rangeSq:
94 | if len(self.neighbors) == self.maxNeighbors:
95 | self.neighbors.pop()
96 | self.neighbors.append((other_agent, distSq))
97 | self.neighbors.sort(key=takeSecond)
98 | if len(self.neighbors) == self.maxNeighbors:
99 | rangeSq = self.neighbors[-1][1]
100 |
101 | def insertObstacleNeighbor(self, obstacle, rangeSq):
102 | index = self.id
103 | ob_index = obstacle.id
104 | distSq1 = l3normsq(self.pos_global_frame, obstacle.pos_global_frame)
105 | '''适合半径接近或大于neighborDist的时候'''
106 | distSq = (l3norm(self.pos_global_frame, obstacle.pos_global_frame)-obstacle.radius)**2
107 | if distSq1 < sqr(self.radius + obstacle.radius) and distSq < rangeSq: # COLLISION!
108 | if not self.is_collision:
109 | self.is_collision = True
110 | self.neighbors.clear()
111 |
112 | if len(self.neighbors) == self.maxNeighbors:
113 | self.neighbors.pop()
114 | self.neighbors.append((obstacle, distSq))
115 | self.neighbors.sort(key=takeSecond)
116 | if len(self.neighbors) == self.maxNeighbors:
117 | rangeSq = self.neighbors[-1][1]
118 | elif not self.is_collision and distSq < rangeSq:
119 | if len(self.neighbors) == self.maxNeighbors:
120 | self.neighbors.pop()
121 | self.neighbors.append((obstacle, distSq))
122 | self.neighbors.sort(key=takeSecond)
123 | if len(self.neighbors) == self.maxNeighbors:
124 | rangeSq = self.neighbors[-1][1]
125 |
126 | def to_vector(self):
127 | """ Convert the agent's attributes to a single global state vector. """
128 | global_state_dict = {
129 | 'radius': self.radius,
130 | 'pref_speed': self.pref_speed,
131 | 'pos_x': self.pos_global_frame[0],
132 | 'pos_y': self.pos_global_frame[1],
133 | 'pos_z': self.pos_global_frame[2],
134 | 'gol_x': self.goal_global_frame[0],
135 | 'gol_y': self.goal_global_frame[1],
136 | 'gol_z': self.goal_global_frame[2],
137 | 'vel_x': self.vel_global_frame[0],
138 | 'vel_y': self.vel_global_frame[1],
139 | 'vel_z': self.vel_global_frame[2],
140 | 'alpha': self.heading_global_frame[0],
141 | 'beta': self.heading_global_frame[1],
142 | 'gamma': self.heading_global_frame[2],
143 | }
144 | global_state = np.array([val for val in global_state_dict.values()])
145 | animation_columns_dict = {}
146 | for key in self.ANIMATION_COLUMNS:
147 | animation_columns_dict.update({key: global_state_dict[key]})
148 | self.history_info = self.history_info.append([animation_columns_dict], ignore_index=True)
--------------------------------------------------------------------------------
/mamp/agents/obstacle.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from math import sqrt
3 |
4 |
5 | class Obstacle(object):
6 | def __init__(self, pos, shape_dict, id):
7 | self.shape = shape = shape_dict['shape']
8 | self.feature = feature = shape_dict['feature']
9 | if shape == 'cube':
10 | self.length, self.width, self.height = shape_dict['feature']
11 | self.radius = sqrt(self.length ** 2 + self.width ** 2 + self.height ** 2) / 2
12 | elif shape == 'sphere':
13 | self.radius = shape_dict['feature']
14 | else:
15 | raise NotImplementedError
16 | self.pos_global_frame = np.array(pos, dtype='float64')
17 | self.vel_global_frame = np.array([0.0, 0.0, 0.0])
18 | self.pos = pos
19 | self.id = id
20 | self.t = 0.0
21 | self.step_num = 0
22 | self.is_at_goal = True
23 | self.is_obstacle = True
24 | self.was_in_collision_already = False
25 | self.is_collision = False
26 |
27 | self.x = pos[0]
28 | self.y = pos[1]
29 | self.z = pos[2]
--------------------------------------------------------------------------------
/mamp/configs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/mamp/configs/__init__.py
--------------------------------------------------------------------------------
/mamp/configs/config.py:
--------------------------------------------------------------------------------
1 | eps = 10 ** 5 # 保留5位小数
2 | DT = 0.1
3 | NEAR_GOAL_THRESHOLD = 0.5
4 | rvo3d_epsilon = 1e-5
5 |
--------------------------------------------------------------------------------
/mamp/envs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/mamp/envs/__init__.py
--------------------------------------------------------------------------------
/mamp/envs/mampenv.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import numpy as np
3 | from math import sqrt
4 | from mamp.util import pi_2_pi, l3norm
5 | from mamp.policies.kdTree import KDTree
6 | from mamp.configs.config import NEAR_GOAL_THRESHOLD
7 |
8 |
9 | class MACAEnv(object):
10 | def __init__(self):
11 | self.agents = None
12 | self.centralized_planner = None
13 | self.obstacles = []
14 | self.kdTree = None
15 |
16 | def set_agents(self, agents, obstacles=None):
17 | self.agents = agents
18 | self.obstacles = obstacles
19 | self.kdTree = KDTree(self.agents, self.obstacles)
20 | self.kdTree.buildObstacleTree()
21 |
22 | def step(self, actions):
23 | self._take_action(actions)
24 | which_agents_done = self.is_done(NEAR_GOAL_THRESHOLD)
25 | return which_agents_done
26 |
27 | def _take_action(self, actions):
28 | self.kdTree.buildAgentTree()
29 |
30 | num_actions_per_agent = 7 # vx, vy, vz, speed, alpha, beta, gamma
31 | all_actions = np.zeros((len(self.agents), num_actions_per_agent), dtype=np.float32)
32 |
33 | # update for velocity and position
34 | for agent_index, agent in enumerate(self.agents):
35 | if agent.is_at_goal or agent.is_collision or agent.is_out_of_max_time:
36 | continue
37 | other_agents = copy.copy(self.agents)
38 | other_agents.remove(agent)
39 | dict_comm = {'other_agents': other_agents, 'obstacles': self.obstacles}
40 | all_actions[agent_index, :] = agent.policy.find_next_action(dict_comm, agent, self.kdTree)
41 |
42 | for i, agent in enumerate(self.agents):
43 | update_velocitie(agent, all_actions[i, :])
44 | if not agent.is_at_goal:
45 | agent.step_num += 1
46 | self.check_agent_state(agent) # check collision
47 | for i, agent in enumerate(self.agents):
48 | if agent.is_collision:
49 | print('agent' + str(agent.id) + ': collision')
50 |
51 | def is_done(self, near_goal_threshold):
52 | for ag in self.agents:
53 | if l3norm(ag.pos_global_frame, ag.goal_global_frame) <= near_goal_threshold:
54 | ag.is_at_goal = True
55 | if ag.is_at_goal or ag.is_collision or ag.is_out_of_max_time:
56 | ag.is_run_done = True
57 | is_done_condition = np.array([ag.is_run_done for ag in self.agents])
58 | check_is_done_condition = np.logical_and.reduce(is_done_condition)
59 | return check_is_done_condition
60 |
61 | def check_agent_state(self, agent):
62 | # check collision
63 | for ob in self.obstacles:
64 | dis_a_ob = l3norm(agent.pos_global_frame, ob.pos_global_frame)
65 | if dis_a_ob <= (agent.radius + ob.radius):
66 | agent.is_collision = True
67 | # print('collision: agent' + str(agent.id) + ' and obstacle' + str(ob.id))
68 | for ag in self.agents:
69 | if ag.id == agent.id: continue
70 | dis_a_agent = l3norm(agent.pos_global_frame, ag.pos_global_frame)
71 | if dis_a_agent <= (agent.radius + ag.radius):
72 | if not ag.is_at_goal:
73 | ag.is_collision = True
74 | if not agent.is_at_goal:
75 | agent.is_collision = True
76 | # print('collision: agent' + str(agent.id) + ' and agent' + str(ag.id))
77 | if agent.total_dist > agent.max_run_dist:
78 | # print(agent.total_dist, agent.max_run_dist)
79 | agent.is_out_of_max_time = True
80 | print(' agent' + str(agent.id) + ' is_out_of_max_time')
81 |
82 |
83 | def update_velocitie(agent, action):
84 | selected_speed = action[3]
85 |
86 | selected_alpha_heading = pi_2_pi(agent.heading_global_frame[0] + action[4])
87 | selected_beta_heading = pi_2_pi(agent.heading_global_frame[1] + action[5])
88 | selected_gamma_heading = pi_2_pi(agent.heading_global_frame[2] + action[6])
89 |
90 | dx = selected_speed * np.cos(selected_beta_heading) * np.cos(selected_alpha_heading) * agent.dt_nominal
91 | dy = selected_speed * np.cos(selected_beta_heading) * np.sin(selected_alpha_heading) * agent.dt_nominal
92 | dz = selected_speed * np.sin(selected_beta_heading) * agent.dt_nominal
93 | length = sqrt(dx ** 2 + dy ** 2 + dz ** 2)
94 | agent.total_dist += length
95 | agent.pos_global_frame += np.array([dx, dy, dz])
96 |
97 | if len(agent.travelled_traj_node) == agent.num_node:
98 | agent.travelled_traj_node.pop(0)
99 | agent.travelled_traj_node.append(agent.pos_global_frame)
100 | else:
101 | agent.travelled_traj_node.append(agent.pos_global_frame)
102 |
103 | agent.heading_global_frame = [selected_alpha_heading, selected_beta_heading, selected_gamma_heading]
104 | agent.vel_global_frame = np.array(action[:3])
105 | agent.to_vector()
106 |
107 |
--------------------------------------------------------------------------------
/mamp/policies/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/mamp/policies/__init__.py
--------------------------------------------------------------------------------
/mamp/policies/kdTree.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2022.04.16
4 | @ Details: KDTree 3D
5 | @ github: https://github.com/snape/RVO2-3D
6 | """
7 |
8 | import numpy as np
9 | from math import sqrt, sin, cos, atan2, asin, pi, floor, acos, asin
10 | from itertools import combinations, product
11 | from mamp.util import l2norm, leftOf, det, l2normsq, sqr
12 |
13 |
14 | class AgentTreeNode(object):
15 | def __init__(self):
16 | self.begin = 0
17 | self.end = 0
18 | self.left = 0
19 | self.right = 0
20 | self.maxCoord = np.array([0.0, 0.0, 0.0])
21 | self.minCoord = np.array([0.0, 0.0, 0.0])
22 |
23 |
24 | class ObstacleTreeNode(object):
25 | def __init__(self):
26 | self.begin = 0
27 | self.end = 0
28 | self.left = 0
29 | self.right = 0
30 | self.maxCoord = np.array([0.0, 0.0, 0.0])
31 | self.minCoord = np.array([0.0, 0.0, 0.0])
32 |
33 |
34 | class KDTree(object):
35 | def __init__(self, agents, obstacles):
36 | self.agents = agents
37 | self.obstacles = obstacles
38 | self.agent_tree_node = AgentTreeNode()
39 | self.obstacle_tree_node = ObstacleTreeNode()
40 | self.agentTree = []
41 | for i in range(2 * len(agents) - 1):
42 | self.agentTree.append(AgentTreeNode())
43 | self.agentIDs = []
44 | for obj in agents:
45 | self.agentIDs.append(obj.id)
46 |
47 | self.obstacleTree = []
48 | for i in range(2 * len(obstacles) - 1):
49 | self.obstacleTree.append(ObstacleTreeNode())
50 | self.obstacleIDs = []
51 | for obj in obstacles:
52 | self.obstacleIDs.append(obj.id)
53 | self.max_leaf_size = 10
54 | self.epsilon = 1e-5
55 |
56 | def buildAgentTree(self):
57 | if self.agentIDs:
58 | self.buildAgentTreeRecursive(0, len(self.agentIDs), 0)
59 |
60 | def buildAgentTreeRecursive(self, begin, end, node):
61 | self.agentTree[node].begin = begin
62 | self.agentTree[node].end = end
63 | # 不直接用数组进行赋值是因为会指向同一地址,智能体的当前位置也会跟随minCoord, maxCoord改变
64 | self.agentTree[node].minCoord[0] = self.agents[self.agentIDs[begin]].pos_global_frame[0]
65 | self.agentTree[node].minCoord[1] = self.agents[self.agentIDs[begin]].pos_global_frame[1]
66 | self.agentTree[node].minCoord[2] = self.agents[self.agentIDs[begin]].pos_global_frame[2]
67 | self.agentTree[node].maxCoord[0] = self.agents[self.agentIDs[begin]].pos_global_frame[0]
68 | self.agentTree[node].maxCoord[1] = self.agents[self.agentIDs[begin]].pos_global_frame[1]
69 | self.agentTree[node].maxCoord[2] = self.agents[self.agentIDs[begin]].pos_global_frame[2]
70 |
71 | for i in range(begin + 1, end):
72 | self.agentTree[node].maxCoord[0] = max(self.agentTree[node].maxCoord[0],
73 | self.agents[self.agentIDs[i]].pos_global_frame[0])
74 | self.agentTree[node].minCoord[0] = min(self.agentTree[node].minCoord[0],
75 | self.agents[self.agentIDs[i]].pos_global_frame[0])
76 | self.agentTree[node].maxCoord[1] = max(self.agentTree[node].maxCoord[1],
77 | self.agents[self.agentIDs[i]].pos_global_frame[1])
78 | self.agentTree[node].minCoord[1] = min(self.agentTree[node].minCoord[1],
79 | self.agents[self.agentIDs[i]].pos_global_frame[1])
80 | self.agentTree[node].maxCoord[2] = max(self.agentTree[node].maxCoord[2],
81 | self.agents[self.agentIDs[i]].pos_global_frame[2])
82 | self.agentTree[node].minCoord[2] = min(self.agentTree[node].minCoord[2],
83 | self.agents[self.agentIDs[i]].pos_global_frame[2])
84 |
85 | if end - begin > self.max_leaf_size: # No leaf node
86 | dif0 = self.agentTree[node].maxCoord[0] - self.agentTree[node].minCoord[0]
87 | dif1 = self.agentTree[node].maxCoord[1] - self.agentTree[node].minCoord[1]
88 | dif2 = self.agentTree[node].maxCoord[2] - self.agentTree[node].minCoord[2]
89 | if dif0 > dif1 and dif0 > dif2:
90 | coord = 0
91 | elif dif1 > dif2:
92 | coord = 1
93 | else:
94 | coord = 2
95 |
96 | splitValue = 0.5 * (self.agentTree[node].maxCoord[coord] + self.agentTree[node].minCoord[coord])
97 |
98 | left = begin
99 | right = end
100 |
101 | while left < right:
102 | while left < right and self.agents[self.agentIDs[left]].pos_global_frame[coord] < splitValue:
103 | left += 1
104 |
105 | while right > left and self.agents[self.agentIDs[right - 1]].pos_global_frame[coord] >= splitValue:
106 | right -= 1
107 |
108 | if left < right:
109 | self.agentIDs[left], self.agentIDs[right - 1] = self.agentIDs[right - 1], self.agentIDs[left]
110 | left += 1
111 | right -= 1
112 | leftSize = left - begin
113 | if leftSize == 0:
114 | leftSize += 1
115 | left += 1
116 | right += 1
117 |
118 | self.agentTree[node].left = node + 1
119 | self.agentTree[node].right = node + 2 * leftSize # node + 1 + (2 * leftsize - 1)
120 |
121 | self.buildAgentTreeRecursive(begin, left, self.agentTree[node].left)
122 | self.buildAgentTreeRecursive(left, end, self.agentTree[node].right)
123 |
124 | def computeAgentNeighbors(self, agent, rangeSq):
125 | self.queryAgentTreeRecursive(agent, rangeSq, 0)
126 |
127 | def queryAgentTreeRecursive(self, agent, rangeSq, node):
128 | if self.agentTree[node].end - self.agentTree[node].begin <= self.max_leaf_size:
129 | for i in range(self.agentTree[node].begin, self.agentTree[node].end):
130 | agent.insertAgentNeighbor(self.agents[self.agentIDs[i]], rangeSq)
131 | else:
132 | distSqLeft = (
133 | sqr(max(0.0, self.agentTree[self.agentTree[node].left].minCoord[0] - agent.pos_global_frame[0])) +
134 | sqr(max(0.0, agent.pos_global_frame[0] - self.agentTree[self.agentTree[node].left].maxCoord[0])) +
135 | sqr(max(0.0, self.agentTree[self.agentTree[node].left].minCoord[1] - agent.pos_global_frame[1])) +
136 | sqr(max(0.0, agent.pos_global_frame[1] - self.agentTree[self.agentTree[node].left].maxCoord[1])) +
137 | sqr(max(0.0, self.agentTree[self.agentTree[node].left].minCoord[2] - agent.pos_global_frame[2])) +
138 | sqr(max(0.0, agent.pos_global_frame[2] - self.agentTree[self.agentTree[node].left].maxCoord[2])))
139 | distSqRight = (
140 | sqr(max(0.0, self.agentTree[self.agentTree[node].right].minCoord[0] - agent.pos_global_frame[0])) +
141 | sqr(max(0.0, agent.pos_global_frame[0] - self.agentTree[self.agentTree[node].right].maxCoord[0])) +
142 | sqr(max(0.0, self.agentTree[self.agentTree[node].right].minCoord[1] - agent.pos_global_frame[1])) +
143 | sqr(max(0.0, agent.pos_global_frame[1] - self.agentTree[self.agentTree[node].right].maxCoord[1])) +
144 | sqr(max(0.0, self.agentTree[self.agentTree[node].right].minCoord[2] - agent.pos_global_frame[2])) +
145 | sqr(max(0.0, agent.pos_global_frame[2] - self.agentTree[self.agentTree[node].right].maxCoord[2])))
146 |
147 | if distSqLeft < distSqRight:
148 | if distSqLeft < rangeSq:
149 | self.queryAgentTreeRecursive(agent, rangeSq, self.agentTree[node].left)
150 | if distSqRight < rangeSq:
151 | self.queryAgentTreeRecursive(agent, rangeSq, self.agentTree[node].right)
152 | else:
153 | if distSqRight < rangeSq:
154 | self.queryAgentTreeRecursive(agent, rangeSq, self.agentTree[node].right)
155 | if distSqLeft < rangeSq:
156 | self.queryAgentTreeRecursive(agent, rangeSq, self.agentTree[node].left)
157 |
158 | def buildObstacleTree(self):
159 | if self.obstacleIDs:
160 | self.buildObstacleTreeRecursive(0, len(self.obstacleIDs), 0)
161 |
162 | def buildObstacleTreeRecursive(self, begin, end, node):
163 | print(begin, end, node)
164 | self.obstacleTree[node].begin = begin
165 | self.obstacleTree[node].end = end
166 | # 不直接用数组进行赋值是因为会指向同一地址,智能体的当前位置也会跟随minCoord, maxCoord改变
167 | self.obstacleTree[node].minCoord[0] = self.obstacles[self.obstacleIDs[begin]].pos_global_frame[0]
168 | self.obstacleTree[node].minCoord[1] = self.obstacles[self.obstacleIDs[begin]].pos_global_frame[1]
169 | self.obstacleTree[node].minCoord[2] = self.obstacles[self.obstacleIDs[begin]].pos_global_frame[2]
170 | self.obstacleTree[node].maxCoord[0] = self.obstacles[self.obstacleIDs[begin]].pos_global_frame[0]
171 | self.obstacleTree[node].maxCoord[1] = self.obstacles[self.obstacleIDs[begin]].pos_global_frame[1]
172 | self.obstacleTree[node].maxCoord[2] = self.obstacles[self.obstacleIDs[begin]].pos_global_frame[2]
173 |
174 | for i in range(begin + 1, end):
175 | self.obstacleTree[node].maxCoord[0] = max(self.obstacleTree[node].maxCoord[0],
176 | self.obstacles[self.obstacleIDs[i]].pos_global_frame[0])
177 | self.obstacleTree[node].minCoord[0] = min(self.obstacleTree[node].minCoord[0],
178 | self.obstacles[self.obstacleIDs[i]].pos_global_frame[0])
179 | self.obstacleTree[node].maxCoord[1] = max(self.obstacleTree[node].maxCoord[1],
180 | self.obstacles[self.obstacleIDs[i]].pos_global_frame[1])
181 | self.obstacleTree[node].minCoord[1] = min(self.obstacleTree[node].minCoord[1],
182 | self.obstacles[self.obstacleIDs[i]].pos_global_frame[1])
183 | self.obstacleTree[node].maxCoord[2] = max(self.obstacleTree[node].maxCoord[2],
184 | self.obstacles[self.obstacleIDs[i]].pos_global_frame[2])
185 | self.obstacleTree[node].minCoord[2] = min(self.obstacleTree[node].minCoord[2],
186 | self.obstacles[self.obstacleIDs[i]].pos_global_frame[2])
187 |
188 | if end - begin > self.max_leaf_size: # No leaf node
189 | dif0 = self.obstacleTree[node].maxCoord[0] - self.obstacleTree[node].minCoord[0]
190 | dif1 = self.obstacleTree[node].maxCoord[1] - self.obstacleTree[node].minCoord[1]
191 | dif2 = self.obstacleTree[node].maxCoord[2] - self.obstacleTree[node].minCoord[2]
192 | if dif0 > dif1 and dif0 > dif2:
193 | coord = 0
194 | elif dif1 > dif2:
195 | coord = 1
196 | else:
197 | coord = 2
198 |
199 | splitValue = 0.5 * (self.obstacleTree[node].maxCoord[coord] + self.obstacleTree[node].minCoord[coord])
200 |
201 | left = begin
202 | right = end
203 |
204 | while left < right:
205 | while left < right and self.obstacles[self.obstacleIDs[left]].pos_global_frame[coord] < splitValue:
206 | left += 1
207 |
208 | while right > left and self.obstacles[self.obstacleIDs[right - 1]].pos_global_frame[
209 | coord] >= splitValue:
210 | right -= 1
211 |
212 | if left < right:
213 | self.obstacleIDs[left], self.obstacleIDs[right - 1] = self.obstacleIDs[right - 1], self.obstacleIDs[
214 | left]
215 | left += 1
216 | right -= 1
217 | leftSize = left - begin
218 | if leftSize == 0:
219 | leftSize += 1
220 | left += 1
221 | right += 1
222 |
223 | self.obstacleTree[node].left = node + 1
224 | self.obstacleTree[node].right = node + 2 * leftSize # node + 1 + (2 * leftsize - 1)
225 |
226 | self.buildObstacleTreeRecursive(begin, left, self.obstacleTree[node].left)
227 | self.buildObstacleTreeRecursive(left, end, self.obstacleTree[node].right)
228 |
229 | def computeObstacleNeighbors(self, agent, rangeSq):
230 | self.queryObstacleTreeRecursive(agent, rangeSq, 0)
231 |
232 | def queryObstacleTreeRecursive(self, agent, rangeSq, node):
233 | if self.obstacleTree:
234 | if self.obstacleTree[node].end - self.obstacleTree[node].begin <= self.max_leaf_size:
235 | for i in range(self.obstacleTree[node].begin, self.obstacleTree[node].end):
236 | agent.insertObstacleNeighbor(self.obstacles[self.obstacleIDs[i]], rangeSq)
237 | else:
238 | distSqLeft = (
239 | sqr(max(0.0, self.obstacleTree[self.obstacleTree[node].left].minCoord[0] - agent.pos_global_frame[0])) +
240 | sqr(max(0.0, agent.pos_global_frame[0] - self.obstacleTree[self.obstacleTree[node].left].maxCoord[0])) +
241 | sqr(max(0.0, self.obstacleTree[self.obstacleTree[node].left].minCoord[1] - agent.pos_global_frame[1])) +
242 | sqr(max(0.0, agent.pos_global_frame[1] - self.obstacleTree[self.obstacleTree[node].left].maxCoord[1])) +
243 | sqr(max(0.0, self.obstacleTree[self.obstacleTree[node].left].minCoord[2] - agent.pos_global_frame[2])) +
244 | sqr(max(0.0, agent.pos_global_frame[2] - self.obstacleTree[self.obstacleTree[node].left].maxCoord[2])))
245 | distSqRight = (
246 | sqr(max(0.0, self.obstacleTree[self.obstacleTree[node].right].minCoord[0] - agent.pos_global_frame[0])) +
247 | sqr(max(0.0, agent.pos_global_frame[0] - self.obstacleTree[self.obstacleTree[node].right].maxCoord[0])) +
248 | sqr(max(0.0, self.obstacleTree[self.obstacleTree[node].right].minCoord[1] - agent.pos_global_frame[1])) +
249 | sqr(max(0.0, agent.pos_global_frame[1] - self.obstacleTree[self.obstacleTree[node].right].maxCoord[1])) +
250 | sqr(max(0.0, self.obstacleTree[self.obstacleTree[node].right].minCoord[2] - agent.pos_global_frame[2])) +
251 | sqr(max(0.0, agent.pos_global_frame[2] - self.obstacleTree[self.obstacleTree[node].right].maxCoord[2])))
252 |
253 | if distSqLeft < distSqRight:
254 | if distSqLeft < rangeSq:
255 | self.queryObstacleTreeRecursive(agent, rangeSq, self.obstacleTree[node].left)
256 | if distSqRight < rangeSq:
257 | self.queryObstacleTreeRecursive(agent, rangeSq, self.obstacleTree[node].right)
258 | else:
259 | if distSqRight < rangeSq:
260 | self.queryObstacleTreeRecursive(agent, rangeSq, self.obstacleTree[node].right)
261 | if distSqLeft < rangeSq:
262 | self.queryObstacleTreeRecursive(agent, rangeSq, self.obstacleTree[node].left)
263 |
--------------------------------------------------------------------------------
/mamp/policies/orca3dPolicy.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2022.04.16
4 | @ Details: ORCA(RVO2) 3D
5 | @ reference: Reciprocal n-Body Collision Avoidance*
6 | @ github: https://github.com/snape/RVO2-3D
7 | """
8 | import time
9 | import numpy as np
10 | from math import sqrt, sin, cos, asin, acos, pi
11 | from mamp.configs.config import eps, DT
12 | from mamp.util import distance, absSq, sqr, normalize, l3norm, satisfied_constraint, reached, cartesian2spherical
13 |
14 |
15 | class Line(object):
16 | def __init__(self):
17 | self.direction = np.array([0.0, 0.0, 1.0]) # The direction of the directed line.
18 | self.point = np.array([0.0, 0.0, 0.0]) # A point on the directed line.
19 |
20 |
21 | class Plane(object):
22 | def __init__(self):
23 | self.point = np.array([0.0, 0.0, 0.0]) # A point on the plane.
24 | self.normal = np.array([0.0, 0.0, 1.0]) # The normal to the plane.
25 |
26 |
27 | class ORCA3DPolicy(object):
28 | """ ORCA3DPolicy """
29 |
30 | def __init__(self):
31 | self.now_goal = None
32 | self.update_nowGoal_dist = 1.0
33 | self.orcaPlanes = []
34 | self.type = "internal"
35 | self.rvo3d_epsilon = 1e-5
36 | self.new_velocity = None
37 |
38 | def find_next_action(self, dict_comm, agent, kdTree):
39 | """
40 | Function: ORCA3D compute suitable speed for agents
41 | """
42 | start_t = time.time()
43 |
44 | self.orcaPlanes.clear()
45 | invTimeHorizon = 1.0 / agent.timeHorizon
46 | self.new_velocity = np.array([0.0, 0.0, 0.0])
47 |
48 | self.get_trajectory(agent)
49 | v_pref = compute_v_pref(self.now_goal, agent)
50 |
51 | computeNeighbors(agent, kdTree)
52 |
53 | if distance(agent.vel_global_frame, [0.0, 0.0, 0.0]) <= 1e-5: # the first step is required for dynamic
54 | vA_post = 0.3 * v_pref
55 |
56 | else:
57 | for obj in agent.neighbors:
58 | obj = obj[0]
59 | relativePosition = obj.pos_global_frame - agent.pos_global_frame
60 | relativeVelocity = agent.vel_global_frame - obj.vel_global_frame
61 | distSq = absSq(relativePosition)
62 | agent_rad = agent.radius + 0.05
63 | obj_rad = obj.radius + 0.05
64 | combinedRadius = agent_rad + obj_rad
65 | combinedRadiusSq = sqr(combinedRadius)
66 |
67 | plane = Plane()
68 | if distSq > combinedRadiusSq: # No collision.
69 | # Vector from cutoff center to relative velocity.
70 | w = relativeVelocity - invTimeHorizon * relativePosition
71 | wLengthSq = absSq(w)
72 | dotProduct = np.dot(w, relativePosition)
73 | if dotProduct < 0.0 and sqr(dotProduct) > combinedRadiusSq * wLengthSq:
74 | # Project on cut-off circle.
75 | wLength = sqrt(wLengthSq)
76 | unitW = w / wLength
77 |
78 | plane.normal = unitW
79 | u = (combinedRadius * invTimeHorizon - wLength) * unitW
80 | # print("Project on cut-off circle.")
81 | else:
82 | # Project on cone.
83 | difSq = distSq - combinedRadiusSq
84 | dot_product = np.dot(relativePosition, relativeVelocity)
85 | wwSq = absSq(np.cross(relativePosition, relativeVelocity)) / difSq # 半径的平方
86 | pApBLength = np.linalg.norm(relativePosition)
87 | pAp1Length = dot_product / pApBLength # p1是vA-vB到直线pB-pA的垂足点
88 | p1otLength = sqrt(wwSq) * (combinedRadius / pApBLength) # ot是对应的圆心
89 | pAotlength = pAp1Length + p1otLength
90 |
91 | t = pAotlength / pApBLength
92 | ww = relativeVelocity - t * relativePosition
93 | wwLength = np.linalg.norm(ww)
94 | unitWW = ww / wwLength
95 | plane.normal = unitWW
96 | u = (combinedRadius * t - wwLength) * unitWW
97 | # print("Project on cone. t = ", t, R1, RR)
98 | else: # Collision.
99 | invTimeStep = 1.0 / agent.timeStep
100 | w = relativeVelocity - invTimeStep * relativePosition
101 | wLength = np.linalg.norm(w)
102 | unitW = w / wLength
103 | plane.normal = unitW
104 | u = (combinedRadius * invTimeStep - wLength) * unitW
105 | # print('Collision.')
106 | plane.point = agent.vel_global_frame + 0.5 * u
107 | self.orcaPlanes.append([plane, combinedRadius, relativePosition, obj.vel_global_frame])
108 |
109 | vA_post = intersect(agent, v_pref, self.orcaPlanes)
110 |
111 | action = cartesian2spherical(agent, vA_post)
112 |
113 | end_t = time.time()
114 | cost_step = end_t - start_t
115 | agent.total_time += cost_step
116 |
117 | dist = round(distance(agent.pos_global_frame, agent.goal_global_frame), 5)
118 | print('agent' + str(agent.id), len(agent.neighbors), round(action[3], 5), '终点距离:', dist)
119 |
120 | return action
121 |
122 | def linearProgram1(self, planes, planeNo, line, maxSpeed, vel_pref, dir_opt):
123 | dotProduct = np.dot(line.point, line.direction)
124 | discriminant = sqr(dotProduct) + sqr(maxSpeed) - absSq(line.point)
125 |
126 | if discriminant < 0.0:
127 | # Max speed sphere fully invalidates line.
128 | return False
129 |
130 | sqrtDiscriminant = sqrt(discriminant)
131 | tLeft = -dotProduct - sqrtDiscriminant
132 | tRight = -dotProduct + sqrtDiscriminant
133 |
134 | for i in range(planeNo):
135 | numerator = np.dot((planes[i].point - line.point), planes[i].normal)
136 | denominator = np.dot(line.direction, planes[i].normal)
137 |
138 | if sqr(denominator) <= self.rvo3d_epsilon:
139 | # Lines line is (almost) parallel to plane i.
140 | if numerator > 0.0:
141 | return False
142 | else:
143 | continue
144 |
145 | t = numerator / denominator
146 |
147 | if denominator >= 0.0:
148 | # Plane i bounds line on the left.
149 | tLeft = max(tLeft, t)
150 | else:
151 | # Plane i bounds line on the right.
152 | tRight = min(tRight, t)
153 |
154 | if tLeft > tRight:
155 | return False
156 |
157 | if dir_opt:
158 | # Optimize direction.
159 | if np.dot(vel_pref, line.direction) > 0.0:
160 | # Take right extreme.
161 | self.new_velocity = line.point + tRight * line.direction
162 |
163 | else:
164 | # Take left extreme.
165 | self.new_velocity = line.point + tLeft * line.direction
166 | else:
167 | # Optimize closest point.
168 | t = np.dot(line.direction, (vel_pref - line.point))
169 |
170 | if t < tLeft:
171 | self.new_velocity = line.point + tLeft * line.direction
172 | elif t > tRight:
173 | self.new_velocity = line.point + tRight * line.direction
174 | else:
175 | self.new_velocity = line.point + t * line.direction
176 |
177 | return True
178 |
179 | def linearProgram2(self, planes, planeNo, maxSpeed, vel_pref, dir_opt):
180 | planeDist = np.dot(planes[planeNo].point, planes[planeNo].normal)
181 | planeDistSq = sqr(planeDist)
182 | radiusSq = sqr(maxSpeed)
183 |
184 | if planeDistSq > radiusSq:
185 | # Max speed sphere fully invalidates plane planeNo.
186 | return False
187 |
188 | planeRadiusSq = radiusSq - planeDistSq
189 |
190 | planeCenter = planeDist * planes[planeNo].normal
191 |
192 | if dir_opt:
193 | # Project direction opt_vel on plane planeNo.
194 | planeOptVelocity = vel_pref - np.dot(vel_pref, planes[planeNo].normal) * planes[planeNo].normal
195 | planeOptVelocityLengthSq = absSq(planeOptVelocity)
196 |
197 | if planeOptVelocityLengthSq <= self.rvo3d_epsilon:
198 | self.new_velocity = planeCenter
199 | else:
200 | self.new_velocity = planeCenter + sqrt(planeRadiusSq / planeOptVelocityLengthSq) * planeOptVelocity
201 |
202 | else:
203 | # Project point optVelocity on plane planeNo.
204 | dot_product = np.dot((planes[planeNo].point - vel_pref), planes[planeNo].normal)
205 | self.new_velocity = vel_pref + dot_product * planes[planeNo].normal
206 |
207 | # If outside planeCircle, project on planeCircle.
208 | if absSq(self.new_velocity) > radiusSq:
209 | planeResult = self.new_velocity - planeCenter
210 | planeResultLengthSq = absSq(planeResult)
211 | self.new_velocity = planeCenter + sqrt(planeRadiusSq / planeResultLengthSq) * planeResult
212 |
213 | for i in range(planeNo):
214 | if np.dot(planes[i].normal, (planes[i].point - self.new_velocity)) > 0.0:
215 | # Result does not satisfy constraint i.Compute new optimal result.
216 | # Compute intersection line of plane i and plane planeNo.
217 | crossProduct = np.cross(planes[i].normal, planes[planeNo].normal)
218 |
219 | if absSq(crossProduct) <= self.rvo3d_epsilon:
220 | # Planes planeNo and i are (almost) parallel, and plane i fully invalidates plane planeNo.
221 | return False
222 |
223 | line = Line()
224 | line.direction = crossProduct / np.linalg.norm(crossProduct)
225 | lineNormal = np.cross(line.direction, planes[planeNo].normal)
226 | dot_product1 = np.dot((planes[i].point - planes[planeNo].point), planes[i].normal)
227 | dot_product2 = np.dot(lineNormal, planes[i].normal)
228 | line.point = planes[planeNo].point + (dot_product1 / dot_product2) * lineNormal
229 |
230 | if not self.linearProgram1(planes, i, line, maxSpeed, vel_pref, dir_opt):
231 | return False
232 |
233 | return True
234 |
235 | def linearProgram3(self, planes, maxSpeed, vel_pref, dir_opt=False):
236 | if dir_opt:
237 | # Optimize direction. Note that the optimization velocity is of unit length in this case.
238 | self.new_velocity = vel_pref * maxSpeed
239 | elif absSq(vel_pref) > sqr(maxSpeed):
240 | # Optimize closest point and outside circle.
241 | self.new_velocity = (vel_pref / np.linalg.norm(vel_pref)) * maxSpeed
242 | else:
243 | # Optimize closest point and inside circle.
244 | self.new_velocity = vel_pref
245 |
246 | for i in range(len(planes)):
247 | if np.dot(planes[i].normal, (planes[i].point - self.new_velocity)) > 0.0: # new_velocity不在ORCA内部
248 | # Resu0lt does not satisfy constraint i. Compute new optimal result.
249 | tempResult = self.new_velocity
250 |
251 | if not self.linearProgram2(planes, i, maxSpeed, vel_pref, dir_opt):
252 | self.new_velocity = tempResult
253 | return i # 没有找到最优解,在find_next_action里面用线性规划linearProgram4找
254 |
255 | return len(planes) # 满足约束条件,找到最优解
256 |
257 | def linearProgram4(self, planes, beginPlane, radius):
258 |
259 | for i in range(beginPlane, len(planes)):
260 | if np.dot(planes[i].normal, (planes[i].point - self.new_velocity) > 0.0):
261 | # Result does not satisfy constraint of plane i.
262 | projPlanes = []
263 |
264 | for j in range(i):
265 | plane = Plane()
266 |
267 | crossProduct = np.cross(planes[j].normal, planes[i].normal)
268 |
269 | if absSq(crossProduct) <= self.rvo3d_epsilon:
270 | # Plane i and plane j are (almost) parallel.
271 | if np.dot(planes[i].normal, planes[j].normal) > 0.0:
272 | # Plane i and plane j point in the same direction.
273 | continue
274 | else:
275 | # Plane i and plane j point in opposite direction.
276 | plane.point = 0.5 * (planes[i].point + planes[j].point)
277 |
278 | else:
279 | # Plane.point is point on line of intersection between plane i and plane j.
280 | lineNormal = np.cross(crossProduct, planes[i].normal)
281 | dot_product1 = np.dot((planes[j].point - planes[i].point), planes[j].normal)
282 | dot_product2 = np.dot(lineNormal, planes[j].normal)
283 | plane.point = planes[i].point + (dot_product1 / dot_product2) * lineNormal
284 |
285 | plane.normal = normalize(planes[j].normal - planes[i].normal)
286 | projPlanes.append(plane)
287 |
288 | tempResult = self.new_velocity
289 |
290 | if self.linearProgram3(projPlanes, radius, planes[i].normal, dir_opt=True) < len(projPlanes):
291 | '''This should in principle not happen.The result is by definition already
292 | in the feasible region of this linear program.If it fails, it is due to small
293 | floating point error, and the current result is kept.'''
294 | self.new_velocity = tempResult
295 |
296 | dist = np.dot(planes[i].normal, (planes[i].point - self.new_velocity))
297 |
298 | def get_trajectory(self, agent): # global planning added A* planner
299 | if agent.path:
300 | if self.now_goal is None: # first
301 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
302 | dis = distance(agent.pos_global_frame, self.now_goal)
303 | dis_nowgoal_globalgoal = distance(self.now_goal, agent.goal_global_frame)
304 | dis_nowgoal_globalpos = distance(agent.pos_global_frame, agent.goal_global_frame)
305 | if dis <= self.update_nowGoal_dist * agent.radius: # free collision
306 | if agent.path:
307 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
308 | elif dis_nowgoal_globalgoal >= dis_nowgoal_globalpos: # free back to current now_goal when free collision
309 | if agent.path:
310 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
311 | else:
312 | self.now_goal = agent.goal_global_frame
313 |
314 |
315 | def is_intersect(pApB, combined_radius, v_dif):
316 | pAB = pApB
317 | dist_pAB = np.linalg.norm(pAB)
318 | if dist_pAB <= combined_radius:
319 | dist_pAB = combined_radius
320 | theta_pABBound = asin(combined_radius / dist_pAB)
321 | theta_pABvCand = acos(np.dot(pAB, v_dif) / (dist_pAB * np.linalg.norm(v_dif)))
322 | if theta_pABBound <= theta_pABvCand: # 不相交或者相切
323 | return False
324 | else:
325 | return True
326 |
327 |
328 | def is_inORCA(orcaPlane, combined_radius, new_v):
329 | relativeVelocity = new_v - orcaPlane.point
330 | if np.dot(relativeVelocity, orcaPlane.normal) >= 0.0: # 不相交或者相切
331 | return True
332 | else:
333 | return False
334 |
335 |
336 | def computeNeighbors(agent, kdTree):
337 | if agent.is_collision:
338 | return
339 |
340 | agent.neighbors.clear()
341 | rangeSq = agent.neighborDist ** 2
342 | # check obstacle neighbors
343 | kdTree.computeObstacleNeighbors(agent, rangeSq)
344 | # check other agents
345 | kdTree.computeAgentNeighbors(agent, rangeSq)
346 |
347 |
348 | def compute_v_pref(goal, agent):
349 | if agent.desire_path_length is None:
350 | agent.desire_path_length = distance(agent.pos_global_frame, agent.goal_global_frame) - 0.5
351 | agent.desire_points_num = agent.desire_path_length / DT
352 | dif_x = goal - agent.pos_global_frame
353 | norm = int(distance(dif_x, [0, 0, 0]) * eps) / eps
354 | norm_dif_x = dif_x * agent.pref_speed / norm
355 | v_pref = np.array(norm_dif_x)
356 | if reached(agent.goal_global_frame, agent.pos_global_frame, bound=0.2):
357 | v_pref[0] = 0.0
358 | v_pref[1] = 0.0
359 | v_pref[2] = 0.0
360 | agent.v_pref = v_pref
361 | V_des = np.array([int(v_pref[0] * eps) / eps, int(v_pref[1] * eps) / eps, int(v_pref[2] * eps) / eps])
362 | return V_des
363 |
364 |
365 | def compute_newV_is_suit(agent, orcaPlanes, new_v):
366 | suit = True
367 | if len(orcaPlanes) == 0:
368 | if not satisfied_constraint(agent, new_v):
369 | suit = False
370 | return suit
371 |
372 | for orcaOb in orcaPlanes:
373 | orcaPlane = orcaOb[0]
374 | combined_radius = orcaOb[1]
375 | if not is_inORCA(orcaPlane, combined_radius, new_v) or not satisfied_constraint(agent, new_v):
376 | suit = False
377 | break
378 | return suit
379 |
380 |
381 | def compute_without_suitV(agent, orcaPlanes, unsuit_v):
382 | tc = []
383 | for orcaOB in orcaPlanes:
384 | combined_radius = orcaOB[1]
385 | pApB = orcaOB[2]
386 | vA = agent.vel_global_frame
387 | vB = orcaOB[3]
388 | v_dif = np.array(unsuit_v - 0.5 * (vA + vB)) if np.linalg.norm(vB) > 1e-5 else np.array(unsuit_v)
389 | if is_intersect(pApB, combined_radius, v_dif) and satisfied_constraint(agent, unsuit_v):
390 | discr = sqr(np.dot(v_dif, pApB)) - absSq(v_dif) * (absSq(pApB) - sqr(combined_radius))
391 | tc_v = (np.dot(v_dif, pApB) - sqrt(discr)) / absSq(v_dif)
392 | if tc_v < 0:
393 | tc_v = 0.0
394 | tc.append(tc_v)
395 | if len(tc) == 0:
396 | tc = [0.0]
397 | return tc
398 |
399 |
400 | def intersect(agent, v_pref, orcaPlanes):
401 | num_N = 256
402 | param_phi = (sqrt(5.0) - 1.0) / 2.0
403 | min_speed = 0.5
404 | suitable_V = []
405 | unsuitable_V = []
406 | for rad in np.arange(min_speed, agent.pref_speed + 0.03, agent.pref_speed - min_speed):
407 | for n in range(1, num_N + 1):
408 | z_n = (2 * n - 1) / num_N - 1
409 | x_n = sqrt(1 - z_n ** 2) * cos(2 * pi * n * param_phi)
410 | y_n = sqrt(1 - z_n ** 2) * sin(2 * pi * n * param_phi)
411 | new_v = np.array([rad * x_n, rad * y_n, rad * z_n])
412 | suit = compute_newV_is_suit(agent, orcaPlanes, new_v)
413 | if suit:
414 | suitable_V.append(new_v)
415 | else:
416 | unsuitable_V.append(new_v)
417 | new_v = v_pref[:]
418 | suit = compute_newV_is_suit(agent, orcaPlanes, new_v)
419 | if suit:
420 | suitable_V.append(new_v)
421 | else:
422 | unsuitable_V.append(new_v)
423 | # ----------------------
424 | if suitable_V:
425 | # print('111--------------------Suitable be found', 'agent'+str(agent.id), len(suitable_V), len(unsuitable_V))
426 | suitable_V.sort(key=lambda v: l3norm(v, v_pref)) # sort begin at minimum and end at maximum
427 | vA_post = suitable_V[0]
428 | else:
429 | # print('222-------------------Suitable not found', 'agent'+str(agent.id), len(suitable_V), len(unsuitable_V))
430 | tc_V = dict()
431 | for unsuit_v in unsuitable_V:
432 | unsuit_v = np.array(unsuit_v)
433 | tc_V[tuple(unsuit_v)] = 0
434 | tc = compute_without_suitV(agent, orcaPlanes, unsuit_v)
435 | tc_V[tuple(unsuit_v)] = min(tc) + 1e-5
436 | WT = 0.2
437 | vA_post = min(unsuitable_V, key=lambda v: ((WT / tc_V[tuple(v)]) + l3norm(v, v_pref)))
438 | vA_post = np.array([int(vA_post[0] * eps) / eps, int(vA_post[1] * eps) / eps, int(vA_post[2] * eps) / eps])
439 | return vA_post
440 |
441 |
442 | if __name__ == "__main__":
443 | pass
444 |
--------------------------------------------------------------------------------
/mamp/policies/orca3dPolicyOfficial.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2022.04.16
4 | @ Details: ORCA(RVO2) 3D
5 | @ reference: Reciprocal n-Body Collision Avoidance*
6 | @ github: https://github.com/snape/RVO2-3D
7 | """
8 | import time
9 | import numpy as np
10 | from math import sqrt, atan2
11 | from mamp.configs.config import eps, DT, rvo3d_epsilon
12 | from mamp.util import distance, absSq, sqr, normalize, reached
13 |
14 |
15 | class Line(object):
16 | def __init__(self):
17 | self.direction = np.array([0.0, 0.0, 1.0]) # The direction of the directed line.
18 | self.point = np.array([0.0, 0.0, 0.0]) # A point on the directed line.
19 |
20 |
21 | class Plane(object):
22 | def __init__(self):
23 | self.point = np.array([0.0, 0.0, 0.0]) # A point on the plane.
24 | self.normal = np.array([0.0, 0.0, 1.0]) # The normal to the plane.
25 |
26 |
27 | class ORCA3DPolicy(object):
28 | """ ORCA3DPolicy """
29 |
30 | def __init__(self):
31 | self.now_goal = None
32 | self.update_nowGoal_dist = 1.0
33 | self.orcaPlanes = []
34 | self.type = "internal"
35 | self.new_velocity = None
36 |
37 | def find_next_action(self, dict_comm, agent, kdTree):
38 | """
39 | Function: ORCA3D compute suitable speed for agents
40 | """
41 | start_t = time.time()
42 |
43 | self.orcaPlanes.clear()
44 | invTimeHorizon = 1.0 / agent.timeHorizon
45 | self.new_velocity = np.array([0.0, 0.0, 0.0])
46 |
47 | self.get_trajectory(agent)
48 | v_pref = compute_v_pref(self.now_goal, agent)
49 |
50 | computeNeighbors(agent, kdTree)
51 |
52 | if distance(agent.vel_global_frame, [0.0, 0.0, 0.0]) <= 1e-5: # the first step is required for dynamic
53 | vA_post = 0.3 * v_pref
54 |
55 | else:
56 | for obj in agent.neighbors:
57 | obj = obj[0]
58 | relativePosition = obj.pos_global_frame - agent.pos_global_frame
59 | relativeVelocity = agent.vel_global_frame - obj.vel_global_frame
60 | distSq = absSq(relativePosition)
61 | agent_rad = agent.radius + 0.05
62 | obj_rad = obj.radius + 0.05
63 | combinedRadius = agent_rad + obj_rad
64 | combinedRadiusSq = sqr(combinedRadius)
65 |
66 | plane = Plane()
67 | if distSq > combinedRadiusSq: # No collision.
68 | # Vector from cutoff center to relative velocity.
69 | w = relativeVelocity - invTimeHorizon * relativePosition
70 | wLengthSq = absSq(w)
71 | dotProduct = np.dot(w, relativePosition)
72 | if dotProduct < 0.0 and sqr(dotProduct) > combinedRadiusSq * wLengthSq:
73 | # Project on cut-off circle.
74 | wLength = sqrt(wLengthSq)
75 | unitW = w / wLength
76 |
77 | plane.normal = unitW
78 | u = (combinedRadius * invTimeHorizon - wLength) * unitW
79 | # print("Project on cut-off circle.")
80 | else:
81 | # Project on cone.
82 | difSq = distSq - combinedRadiusSq
83 | dot_product = np.dot(relativePosition, relativeVelocity)
84 | wwSq = absSq(np.cross(relativePosition, relativeVelocity)) / difSq # 半径的平方
85 | pApBLength = np.linalg.norm(relativePosition)
86 | pAp1Length = dot_product / pApBLength # p1是vA-vB到直线pB-pA的垂足点
87 | p1otLength = sqrt(wwSq) * (combinedRadius / pApBLength) # ot是对应的圆心
88 | pAotlength = pAp1Length + p1otLength
89 |
90 | t = pAotlength / pApBLength
91 | ww = relativeVelocity - t * relativePosition
92 | wwLength = np.linalg.norm(ww)
93 | unitWW = ww / wwLength
94 | plane.normal = unitWW
95 | u = (combinedRadius * t - wwLength) * unitWW
96 | # print("Project on cone. t = ", t, R1, RR)
97 | else: # Collision.
98 | invTimeStep = 1.0 / agent.timeStep
99 | w = relativeVelocity - invTimeStep * relativePosition
100 | wLength = np.linalg.norm(w)
101 | unitW = w / wLength
102 | plane.normal = unitW
103 | u = (combinedRadius * invTimeStep - wLength) * unitW
104 | # print('Collision.')
105 | plane.point = agent.vel_global_frame + 0.5 * u
106 | self.orcaPlanes.append(plane)
107 |
108 | planeFail = self.linearProgram3(self.orcaPlanes, agent.maxSpeed, v_pref)
109 |
110 | if planeFail < len(self.orcaPlanes):
111 | self.linearProgram4(self.orcaPlanes, planeFail, agent.maxSpeed)
112 |
113 | vA_post = np.array([self.new_velocity[0], self.new_velocity[1], self.new_velocity[2]])
114 |
115 | action = cartesian2spherical(agent, vA_post)
116 |
117 | end_t = time.time()
118 | cost_step = end_t - start_t
119 | agent.total_time += cost_step
120 |
121 | dist = round(distance(agent.pos_global_frame, agent.goal_global_frame), 5)
122 | print('agent' + str(agent.id), len(agent.neighbors), round(action[3], 5), '终点距离:', dist)
123 |
124 | return action
125 |
126 | def linearProgram1(self, planes, planeNo, line, maxSpeed, vel_pref, dir_opt):
127 | dotProduct = np.dot(line.point, line.direction)
128 | discriminant = sqr(dotProduct) + sqr(maxSpeed) - absSq(line.point)
129 |
130 | if discriminant < 0.0:
131 | # Max speed sphere fully invalidates line.
132 | return False
133 |
134 | sqrtDiscriminant = sqrt(discriminant)
135 | tLeft = -dotProduct - sqrtDiscriminant
136 | tRight = -dotProduct + sqrtDiscriminant
137 |
138 | for i in range(planeNo):
139 | numerator = np.dot((planes[i].point - line.point), planes[i].normal)
140 | denominator = np.dot(line.direction, planes[i].normal)
141 |
142 | if sqr(denominator) <= rvo3d_epsilon:
143 | # Lines line is (almost) parallel to plane i.
144 | if numerator > 0.0:
145 | return False
146 | else:
147 | continue
148 |
149 | t = numerator / denominator
150 |
151 | if denominator >= 0.0:
152 | # Plane i bounds line on the left.
153 | tLeft = max(tLeft, t)
154 | else:
155 | # Plane i bounds line on the right.
156 | tRight = min(tRight, t)
157 |
158 | if tLeft > tRight:
159 | return False
160 |
161 | if dir_opt:
162 | # Optimize direction.
163 | if np.dot(vel_pref, line.direction) > 0.0:
164 | # Take right extreme.
165 | self.new_velocity = line.point + tRight * line.direction
166 |
167 | else:
168 | # Take left extreme.
169 | self.new_velocity = line.point + tLeft * line.direction
170 | else:
171 | # Optimize closest point.
172 | t = np.dot(line.direction, (vel_pref - line.point))
173 |
174 | if t < tLeft:
175 | self.new_velocity = line.point + tLeft * line.direction
176 | elif t > tRight:
177 | self.new_velocity = line.point + tRight * line.direction
178 | else:
179 | self.new_velocity = line.point + t * line.direction
180 |
181 | return True
182 |
183 | def linearProgram2(self, planes, planeNo, maxSpeed, vel_pref, dir_opt):
184 | planeDist = np.dot(planes[planeNo].point, planes[planeNo].normal)
185 | planeDistSq = sqr(planeDist)
186 | radiusSq = sqr(maxSpeed)
187 |
188 | if planeDistSq > radiusSq:
189 | # Max speed sphere fully invalidates plane planeNo.
190 | return False
191 |
192 | planeRadiusSq = radiusSq - planeDistSq
193 |
194 | planeCenter = planeDist * planes[planeNo].normal
195 |
196 | if dir_opt:
197 | # Project direction opt_vel on plane planeNo.
198 | planeOptVelocity = vel_pref - np.dot(vel_pref, planes[planeNo].normal) * planes[planeNo].normal
199 | planeOptVelocityLengthSq = absSq(planeOptVelocity)
200 |
201 | if planeOptVelocityLengthSq <= rvo3d_epsilon:
202 | self.new_velocity = planeCenter
203 | else:
204 | self.new_velocity = planeCenter + sqrt(planeRadiusSq / planeOptVelocityLengthSq) * planeOptVelocity
205 |
206 | else:
207 | # Project point optVelocity on plane planeNo.
208 | dot_product = np.dot((planes[planeNo].point - vel_pref), planes[planeNo].normal)
209 | self.new_velocity = vel_pref + dot_product * planes[planeNo].normal
210 |
211 | # If outside planeCircle, project on planeCircle.
212 | if absSq(self.new_velocity) > radiusSq:
213 | planeResult = self.new_velocity - planeCenter
214 | planeResultLengthSq = absSq(planeResult)
215 | self.new_velocity = planeCenter + sqrt(planeRadiusSq / planeResultLengthSq) * planeResult
216 |
217 | for i in range(planeNo):
218 | if np.dot(planes[i].normal, (planes[i].point - self.new_velocity)) > 0.0:
219 | # Result does not satisfy constraint i.Compute new optimal result.
220 | # Compute intersection line of plane i and plane planeNo.
221 | crossProduct = np.cross(planes[i].normal, planes[planeNo].normal)
222 |
223 | if absSq(crossProduct) <= rvo3d_epsilon:
224 | # Planes planeNo and i are (almost) parallel, and plane i fully invalidates plane planeNo.
225 | return False
226 |
227 | line = Line()
228 | line.direction = normalize(crossProduct)
229 | lineNormal = np.cross(line.direction, planes[planeNo].normal)
230 | dot_product1 = np.dot((planes[i].point - planes[planeNo].point), planes[i].normal)
231 | dot_product2 = np.dot(lineNormal, planes[i].normal)
232 | line.point = planes[planeNo].point + (dot_product1 / dot_product2) * lineNormal
233 |
234 | if not self.linearProgram1(planes, i, line, maxSpeed, vel_pref, dir_opt):
235 | return False
236 |
237 | return True
238 |
239 | def linearProgram3(self, planes, maxSpeed, vel_pref, dir_opt=False):
240 | if dir_opt:
241 | # Optimize direction. Note that the optimization velocity is of unit length in this case.
242 | self.new_velocity = vel_pref * maxSpeed
243 | elif absSq(vel_pref) > sqr(maxSpeed):
244 | # Optimize closest point and outside circle.
245 | self.new_velocity = (vel_pref / np.linalg.norm(vel_pref)) * maxSpeed
246 | else:
247 | # Optimize closest point and inside circle.
248 | self.new_velocity = vel_pref
249 |
250 | for i in range(len(planes)):
251 | if np.dot(planes[i].normal, (planes[i].point - self.new_velocity)) > 0.0: # new_velocity不在ORCA内部
252 | # Result does not satisfy constraint i. Compute new optimal result.
253 | tempResult = self.new_velocity
254 |
255 | if not self.linearProgram2(planes, i, maxSpeed, vel_pref, dir_opt):
256 | self.new_velocity = tempResult
257 | return i # 没有找到最优解,在find_next_action里面用线性规划linearProgram4找
258 |
259 | return len(planes) # 满足约束条件,找到最优解
260 |
261 | def linearProgram4(self, planes, beginPlane, radius):
262 |
263 | for i in range(beginPlane, len(planes)):
264 | if np.dot(planes[i].normal, (planes[i].point - self.new_velocity) > 0.0):
265 | # Result does not satisfy constraint of plane i.
266 | projPlanes = []
267 |
268 | for j in range(i):
269 | plane = Plane()
270 |
271 | crossProduct = np.cross(planes[j].normal, planes[i].normal)
272 |
273 | if absSq(crossProduct) <= rvo3d_epsilon:
274 | # Plane i and plane j are (almost) parallel.
275 | if np.dot(planes[i].normal, planes[j].normal) > 0.0:
276 | # Plane i and plane j point in the same direction.
277 | continue
278 | else:
279 | # Plane i and plane j point in opposite direction.
280 | plane.point = 0.5 * (planes[i].point + planes[j].point)
281 |
282 | else:
283 | # Plane.point is point on line of intersection between plane i and plane j.
284 | lineNormal = np.cross(crossProduct, planes[i].normal)
285 | dot_product1 = np.dot((planes[j].point - planes[i].point), planes[j].normal)
286 | dot_product2 = np.dot(lineNormal, planes[j].normal)
287 | plane.point = planes[i].point + (dot_product1 / dot_product2) * lineNormal
288 |
289 | plane.normal = normalize(planes[j].normal - planes[i].normal)
290 | projPlanes.append(plane)
291 |
292 | tempResult = self.new_velocity
293 |
294 | if self.linearProgram3(projPlanes, radius, planes[i].normal, dir_opt=True) < len(projPlanes):
295 | '''This should in principle not happen.The result is by definition already
296 | in the feasible region of this linear program.If it fails, it is due to small
297 | floating point error, and the current result is kept.'''
298 | self.new_velocity = tempResult
299 |
300 | dist = np.dot(planes[i].normal, (planes[i].point - self.new_velocity))
301 |
302 | def get_trajectory(self, agent): # global planning added A* planner
303 | if agent.path:
304 | if self.now_goal is None: # first
305 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
306 | dis = distance(agent.pos_global_frame, self.now_goal)
307 | dis_nowgoal_globalgoal = distance(self.now_goal, agent.goal_global_frame)
308 | dis_nowgoal_globalpos = distance(agent.pos_global_frame, agent.goal_global_frame)
309 | if dis <= self.update_nowGoal_dist * agent.radius: # free collision
310 | if agent.path:
311 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
312 | elif dis_nowgoal_globalgoal >= dis_nowgoal_globalpos: # free back to current now_goal when free collision
313 | if agent.path:
314 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
315 | else:
316 | self.now_goal = agent.goal_global_frame
317 |
318 |
319 | def computeNeighbors(agent, kdTree):
320 | if agent.is_collision:
321 | return
322 |
323 | agent.neighbors.clear()
324 | rangeSq = agent.neighborDist ** 2
325 | # check obstacle neighbors
326 | kdTree.computeObstacleNeighbors(agent, rangeSq)
327 | # check other agents
328 | kdTree.computeAgentNeighbors(agent, rangeSq)
329 |
330 |
331 | def cartesian2spherical(agent, vA_post):
332 | speed = distance(vA_post, [0, 0, 0])
333 | if speed < 0.001:
334 | alpha = 0.0
335 | beta = 0.0
336 | gamma = 0.0
337 | else:
338 | alpha = atan2(vA_post[1], vA_post[0]) - agent.heading_global_frame[0]
339 | beta = atan2(vA_post[2], sqrt(pow(vA_post[0], 2) + pow(vA_post[1], 2))) - agent.heading_global_frame[1]
340 | gamma = 0.0
341 | action = [vA_post[0], vA_post[1], vA_post[2], speed, alpha, beta, gamma]
342 | return action
343 |
344 |
345 | def compute_v_pref(goal, agent):
346 | if agent.desire_path_length is None:
347 | agent.desire_path_length = distance(agent.pos_global_frame, agent.goal_global_frame) - 0.5
348 | agent.desire_points_num = agent.desire_path_length / DT
349 | dif_x = goal - agent.pos_global_frame
350 | norm = int(distance(dif_x, [0, 0, 0]) * eps) / eps
351 | norm_dif_x = dif_x * agent.pref_speed / norm
352 | v_pref = np.array(norm_dif_x)
353 | if reached(agent.goal_global_frame, agent.pos_global_frame, bound=0.2):
354 | v_pref[0] = 0.0
355 | v_pref[1] = 0.0
356 | v_pref[2] = 0.0
357 | agent.v_pref = v_pref
358 | V_des = np.array([int(v_pref[0] * eps) / eps, int(v_pref[1] * eps) / eps, int(v_pref[2] * eps) / eps])
359 | return V_des
360 |
361 |
362 | if __name__ == "__main__":
363 | pass
364 |
--------------------------------------------------------------------------------
/mamp/policies/rvo3dPolicy.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2022.04.16
4 | @ Details: RVO3D for multi-agent motion planning
5 | @ reference: Reciprocal Velocity Obstacles for Real-Time Multi-Agent Navigation
6 | @ github: https://github.com/MengGuo/RVO_Py_MAS
7 | """
8 | import time
9 | import numpy as np
10 | from math import sqrt, sin, cos, acos, pi
11 | from mamp.configs.config import eps, DT
12 | from mamp.util import sqr, absSq, l3norm, cartesian2spherical, is_intersect, reached, satisfied_constraint
13 |
14 |
15 | class RVO3DPolicy(object):
16 | """ RVO3DPolicy """
17 |
18 | def __init__(self):
19 | self.now_goal = None
20 | self.update_now_goal_dist = 1.0
21 | self.type = "internal"
22 |
23 | def find_next_action(self, dict_comm, agent, kdTree):
24 | """
25 | Function: RVO3D compute suitable speed for agents
26 | """
27 | start_t = time.time()
28 | self.get_trajectory(agent) # update now_goal
29 | v_pref = compute_v_pref(self.now_goal, agent)
30 | vA = agent.vel_global_frame
31 | if l3norm(vA, [0, 0, 0]) <= 1e-5:
32 | vA_post = 0.3 * v_pref
33 | action = cartesian2spherical(agent, vA_post)
34 | end_t = time.time()
35 | delta_t = end_t - start_t
36 | agent.total_time += delta_t
37 | theta = acos(min(np.dot(vA, action[:3]) / (np.linalg.norm(vA) * np.linalg.norm(action[:3])), 1.0))
38 | else:
39 | pA = agent.pos_global_frame
40 | RVO_BA_all = []
41 | agent_rad = agent.radius + 0.05
42 | computeNeighbors(agent, kdTree)
43 |
44 | # for obj in other_objects:
45 | for obj in agent.neighbors:
46 | obj = obj[0]
47 | pB = obj.pos_global_frame
48 | if obj.is_at_goal:
49 | transl_vB_vA = pA
50 | else:
51 | vB = obj.vel_global_frame
52 | transl_vB_vA = pA + 0.5 * (vB + vA) # use RVO
53 | obj_rad = obj.radius + 0.05
54 |
55 | RVO_BA = [transl_vB_vA, pA, pB, obj_rad + agent_rad]
56 | RVO_BA_all.append(RVO_BA)
57 | vA_post = intersect(v_pref, RVO_BA_all, agent)
58 | action = cartesian2spherical(agent, vA_post)
59 | end_t = time.time()
60 | delta_t = end_t - start_t
61 | agent.total_time += delta_t
62 | theta = acos(min(np.dot(vA, action[:3]) / (np.linalg.norm(vA) * np.linalg.norm(action[:3])), 1.0))
63 |
64 | dist = round(l3norm(agent.pos_global_frame, agent.goal_global_frame), 5)
65 | if theta > agent.max_heading_change:
66 | print('-------------agent' + str(agent.id), len(agent.neighbors), theta, action[3], '终点距离:', dist)
67 | else:
68 | print('agent' + str(agent.id), len(agent.neighbors), action[3], '终点距离:', dist)
69 | return action
70 |
71 | def get_trajectory(self, agent):
72 | if agent.path:
73 | if self.now_goal is None: # first
74 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
75 | dis = l3norm(agent.pos_global_frame, self.now_goal)
76 | dis_nowgoal_globalgoal = l3norm(self.now_goal, agent.goal_global_frame)
77 | dis_nowgoal_globalpos = l3norm(agent.pos_global_frame, agent.goal_global_frame)
78 | if dis <= self.update_now_goal_dist * agent.radius: # free collision
79 | if agent.path:
80 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
81 | elif dis_nowgoal_globalgoal >= dis_nowgoal_globalpos: # free back to current now_goal when free collision
82 | if agent.path:
83 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
84 | else:
85 | self.now_goal = agent.goal_global_frame
86 |
87 |
88 | def computeNeighbors(agent, kdTree):
89 | if agent.is_collision:
90 | return
91 |
92 | agent.neighbors.clear()
93 | rangeSq = agent.neighborDist ** 2
94 |
95 | # check obstacle neighbors
96 | kdTree.computeObstacleNeighbors(agent, rangeSq)
97 |
98 | # check other agents
99 | kdTree.computeAgentNeighbors(agent, rangeSq)
100 |
101 |
102 | def compute_without_suitV(agent, RVO_BA_all, unsuit_v):
103 | tc = []
104 | for RVO_BA in RVO_BA_all:
105 | p_0 = RVO_BA[0]
106 | pA = RVO_BA[1]
107 | pB = RVO_BA[2]
108 | combined_radius = RVO_BA[3]
109 | v_dif = np.array(unsuit_v + pA - p_0)
110 | pApB = pB - pA
111 | if is_intersect(pA, pB, combined_radius, v_dif) and satisfied_constraint(agent, unsuit_v):
112 | discr = sqr(np.dot(v_dif, pApB)) - absSq(v_dif) * (absSq(pApB) - sqr(combined_radius))
113 | tc_v = (np.dot(v_dif, pApB) - sqrt(discr)) / absSq(v_dif)
114 | if tc_v < 0:
115 | tc_v = 0.0
116 | tc.append(tc_v)
117 | if len(tc) == 0:
118 | tc = [0.0]
119 | return tc
120 |
121 |
122 | def compute_newV_is_suit(agent, RVO_BA_all, new_v):
123 | suit = True
124 | if len(RVO_BA_all) == 0:
125 | if not satisfied_constraint(agent, new_v):
126 | suit = False
127 | return suit
128 |
129 | for RVO_BA in RVO_BA_all:
130 | p_0 = RVO_BA[0]
131 | pA = RVO_BA[1]
132 | pB = RVO_BA[2]
133 | combined_radius = RVO_BA[3]
134 | v_dif = np.array(new_v + pA - p_0) # new_v-0.5*(vA+vB) or new_v-vB
135 | if is_intersect(pA, pB, combined_radius, v_dif) or not satisfied_constraint(agent, new_v):
136 | suit = False
137 | break
138 | return suit
139 |
140 |
141 | def intersect(v_pref, RVO_BA_all, agent):
142 | num_N = 256
143 | param_phi = (sqrt(5.0) - 1.0) / 2.0
144 | min_speed = 0.5
145 | suitable_V = []
146 | unsuitable_V = []
147 | for rad in np.arange(min_speed, agent.pref_speed + 0.03, agent.pref_speed - min_speed):
148 | for n in range(1, num_N + 1):
149 | z_n = (2 * n - 1) / num_N - 1
150 | x_n = sqrt(1 - z_n ** 2) * cos(2 * pi * n * param_phi)
151 | y_n = sqrt(1 - z_n ** 2) * sin(2 * pi * n * param_phi)
152 | new_v = np.array([rad * x_n, rad * y_n, rad * z_n])
153 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
154 | if suit:
155 | suitable_V.append(new_v)
156 | else:
157 | unsuitable_V.append(new_v)
158 | new_v = v_pref[:]
159 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
160 | if suit:
161 | suitable_V.append(new_v)
162 | else:
163 | unsuitable_V.append(new_v)
164 | # ----------------------
165 | if suitable_V:
166 | suitable_V.sort(key=lambda v: l3norm(v, v_pref)) # sort begin at minimum and end at maximum
167 | vA_post = suitable_V[0]
168 | else:
169 | # print('--------------------Suitable not found', 'agent', agent.id, len(suitable_V), len(unsuitable_V))
170 | tc_V = dict()
171 | for unsuit_v in unsuitable_V:
172 | unsuit_v = np.array(unsuit_v)
173 | tc_V[tuple(unsuit_v)] = 0
174 | tc = compute_without_suitV(agent, RVO_BA_all, unsuit_v)
175 | tc_V[tuple(unsuit_v)] = min(tc) + 1e-5
176 | WT = 0.2
177 | vA_post = min(unsuitable_V, key=lambda v: ((WT / tc_V[tuple(v)]) + l3norm(v, v_pref)))
178 | vA_post = np.array([int(vA_post[0] * eps) / eps, int(vA_post[1] * eps) / eps, int(vA_post[2] * eps) / eps])
179 | return vA_post
180 |
181 |
182 | def compute_v_pref(goal, agent):
183 | if agent.desire_path_length is None:
184 | agent.desire_path_length = l3norm(agent.pos_global_frame, agent.goal_global_frame) - 0.5
185 | agent.desire_points_num = agent.desire_path_length / DT
186 | dif_x = goal - agent.pos_global_frame
187 | norm = int(l3norm(dif_x, [0, 0, 0]) * eps) / eps
188 | norm_dif_x = dif_x * agent.pref_speed / norm
189 | v_pref = np.array(norm_dif_x)
190 | if reached(agent.goal_global_frame, agent.pos_global_frame, bound=0.2):
191 | v_pref[0] = 0.0
192 | v_pref[1] = 0.0
193 | v_pref[2] = 0.0
194 | agent.v_pref = v_pref
195 | V_des = np.array([int(v_pref[0] * eps) / eps, int(v_pref[1] * eps) / eps, int(v_pref[2] * eps) / eps])
196 | return V_des
197 |
198 |
199 | if __name__ == "__main__":
200 | pass
201 |
--------------------------------------------------------------------------------
/mamp/policies/sca/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/mamp/policies/sca/__init__.py
--------------------------------------------------------------------------------
/mamp/policies/sca/dubinsmaneuver2d.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2021.8.13
4 | @ Details: 2D dubins path planner
5 | @ reference: 2020 ICRA Minimal 3D Dubins Path with Bounded Curvature and Pitch Angle
6 | @ github: https://github.com/comrob/Dubins3D.jl
7 | """
8 |
9 |
10 | import math
11 | import numpy as np
12 | from mamp.util import mod2pi, pi_2_pi
13 | import matplotlib.pyplot as plt
14 |
15 |
16 | class DubinsManeuver(object):
17 | def __init__(self, qi, qf, r_min):
18 | self.qi = qi
19 | self.qf = qf
20 | self.r_min = r_min
21 | self.t = -1.0
22 | self.p = -1.0
23 | self.q = -1.0
24 | self.px = []
25 | self.py = []
26 | self.pyaw = []
27 | self.path = []
28 | self.mode = None
29 | self.length = -1.0
30 | self.sampling_size = None
31 |
32 |
33 | def LSL(alpha, beta, d):
34 | sa = math.sin(alpha)
35 | sb = math.sin(beta)
36 | ca = math.cos(alpha)
37 | cb = math.cos(beta)
38 | c_ab = math.cos(alpha - beta)
39 |
40 | tmp0 = d + sa - sb
41 |
42 | mode = ["L", "S", "L"]
43 | p_squared = 2 + (d * d) - (2 * c_ab) + (2 * d * (sa - sb))
44 | if p_squared < 0:
45 | return None, None, None, mode
46 | tmp1 = math.atan2((cb - ca), tmp0)
47 | t = mod2pi(-alpha + tmp1)
48 | p = math.sqrt(p_squared)
49 | q = mod2pi(beta - tmp1)
50 |
51 | return t, p, q, mode
52 |
53 |
54 | def RSR(alpha, beta, d):
55 | sa = math.sin(alpha)
56 | sb = math.sin(beta)
57 | ca = math.cos(alpha)
58 | cb = math.cos(beta)
59 | c_ab = math.cos(alpha - beta)
60 |
61 | tmp0 = d - sa + sb
62 | mode = ["R", "S", "R"]
63 | p_squared = 2 + (d * d) - (2 * c_ab) + (2 * d * (sb - sa))
64 | if p_squared < 0:
65 | return None, None, None, mode
66 | tmp1 = math.atan2((ca - cb), tmp0)
67 | t = mod2pi(alpha - tmp1)
68 | p = math.sqrt(p_squared)
69 | q = mod2pi(-beta + tmp1)
70 |
71 | return t, p, q, mode
72 |
73 |
74 | def LSR(alpha, beta, d):
75 | sa = math.sin(alpha)
76 | sb = math.sin(beta)
77 | ca = math.cos(alpha)
78 | cb = math.cos(beta)
79 | c_ab = math.cos(alpha - beta)
80 |
81 | p_squared = -2 + (d * d) + (2 * c_ab) + (2 * d * (sa + sb))
82 | mode = ["L", "S", "R"]
83 | if p_squared < 0:
84 | return None, None, None, mode
85 | p = math.sqrt(p_squared)
86 | tmp2 = math.atan2((-ca - cb), (d + sa + sb)) - math.atan2(-2.0, p)
87 | t = mod2pi(-alpha + tmp2)
88 | q = mod2pi(-mod2pi(beta) + tmp2)
89 |
90 | return t, p, q, mode
91 |
92 |
93 | def RSL(alpha, beta, d):
94 | sa = math.sin(alpha)
95 | sb = math.sin(beta)
96 | ca = math.cos(alpha)
97 | cb = math.cos(beta)
98 | c_ab = math.cos(alpha - beta)
99 |
100 | p_squared = (d * d) - 2 + (2 * c_ab) - (2 * d * (sa + sb))
101 | mode = ["R", "S", "L"]
102 | if p_squared < 0:
103 | return None, None, None, mode
104 | p = math.sqrt(p_squared)
105 | tmp2 = math.atan2((ca + cb), (d - sa - sb)) - math.atan2(2.0, p)
106 | t = mod2pi(alpha - tmp2)
107 | q = mod2pi(beta - tmp2)
108 |
109 | return t, p, q, mode
110 |
111 |
112 | def RLR(alpha, beta, d):
113 | sa = math.sin(alpha)
114 | sb = math.sin(beta)
115 | ca = math.cos(alpha)
116 | cb = math.cos(beta)
117 | c_ab = math.cos(alpha - beta)
118 |
119 | mode = ["R", "L", "R"]
120 | tmp_rlr = (6.0 - d * d + 2.0 * c_ab + 2.0 * d * (sa - sb)) / 8.0
121 | if abs(tmp_rlr) > 1.0:
122 | return None, None, None, mode
123 |
124 | p = mod2pi(2 * math.pi - math.acos(tmp_rlr))
125 | t = mod2pi(alpha - math.atan2(ca - cb, d - sa + sb) + mod2pi(p / 2.0))
126 | q = mod2pi(alpha - beta - t + mod2pi(p))
127 | return t, p, q, mode
128 |
129 |
130 | def LRL(alpha, beta, d):
131 | sa = math.sin(alpha)
132 | sb = math.sin(beta)
133 | ca = math.cos(alpha)
134 | cb = math.cos(beta)
135 | c_ab = math.cos(alpha - beta)
136 |
137 | mode = ["L", "R", "L"]
138 | tmp_lrl = (6. - d * d + 2 * c_ab + 2 * d * (- sa + sb)) / 8.
139 | if abs(tmp_lrl) > 1:
140 | return None, None, None, mode
141 | p = mod2pi(2 * math.pi - math.acos(tmp_lrl))
142 | t = mod2pi(-alpha - math.atan2(ca - cb, d + sa - sb) + p / 2.)
143 | q = mod2pi(mod2pi(beta) - alpha - t + mod2pi(p))
144 |
145 | return t, p, q, mode
146 |
147 |
148 | def dubins_path_planning_from_origin(ex, ey, syaw, eyaw, c):
149 | # nomalize
150 | dx = ex
151 | dy = ey
152 | D = math.sqrt(dx ** 2.0 + dy ** 2.0)
153 | d = D / c
154 |
155 | theta = mod2pi(math.atan2(dy, dx))
156 | alpha = mod2pi(syaw - theta)
157 | beta = mod2pi(eyaw - theta)
158 |
159 | planners = [LSL, RSR, LSR, RSL, RLR, LRL]
160 |
161 | bcost = float("inf")
162 | bt, bp, bq, bmode = None, None, None, None
163 |
164 | for planner in planners:
165 | t, p, q, mode = planner(alpha, beta, d)
166 | if t is None:
167 | continue
168 |
169 | cost = c*(abs(t) + abs(p) + abs(q))
170 | if bcost > cost:
171 | bt, bp, bq, bmode = t, p, q, mode
172 | bcost = cost
173 |
174 | px, py, pyaw = generate_course([bt, bp, bq], bmode, c)
175 |
176 | return px, py, pyaw, bmode, bcost, bt, bp, bq
177 |
178 |
179 | def dubins_path_planning(start_pos, end_pos, c):
180 | """
181 | Dubins path plannner
182 |
183 | input:start_pos, end_pos, c
184 | start_pos[0] x position of start point [m]
185 | start_pos[1] y position of start point [m]
186 | start_pos[2] yaw angle of start point [rad]
187 | end_pos[0] x position of end point [m]
188 | end_pos[1] y position of end point [m]
189 | end_pos[2] yaw angle of end point [rad]
190 | c radius [m]
191 |
192 | output: maneuver
193 | maneuver.t the first segment curve of dubins
194 | maneuver.p the second segment line of dubins
195 | maneuver.q the third segment curve of dubins
196 | maneuver.px x position sets [m]
197 | maneuver.py y position sets [m]
198 | maneuver.pyaw heading angle sets [rad]
199 | maneuver.length length of dubins
200 | maneuver.mode mode of dubins
201 | """
202 | maneuver = DubinsManeuver(start_pos, end_pos, c)
203 | sx, sy = start_pos[0], start_pos[1]
204 | ex, ey = end_pos[0], end_pos[1]
205 | syaw, eyaw = start_pos[2], end_pos[2]
206 |
207 | ex = ex - sx
208 | ey = ey - sy
209 |
210 | lpx, lpy, lpyaw, mode, clen, t, p, q = dubins_path_planning_from_origin(ex, ey, syaw, eyaw, c)
211 |
212 | px = [math.cos(-syaw) * x + math.sin(-syaw) * y + sx for x, y in zip(lpx, lpy)]
213 | py = [-math.sin(-syaw) * x + math.cos(-syaw) * y + sy for x, y in zip(lpx, lpy)]
214 | pyaw = [pi_2_pi(iyaw + syaw) for iyaw in lpyaw]
215 | maneuver.t, maneuver.p, maneuver.q, maneuver.mode = t, p, q, mode
216 | maneuver.px, maneuver.py, maneuver.pyaw, maneuver.length = px, py, pyaw, clen
217 |
218 | return maneuver
219 |
220 |
221 | def generate_course(length, mode, c):
222 | px = [0.0]
223 | py = [0.0]
224 | pyaw = [0.0]
225 |
226 | for m, l in zip(mode, length):
227 | pd = 0.0
228 | if m == "S":
229 | d = 1.0 / c
230 | else: # turning course
231 | d = np.deg2rad(6.0)
232 |
233 | while pd < abs(l - d):
234 | px.append(px[-1] + d * c * math.cos(pyaw[-1]))
235 | py.append(py[-1] + d * c * math.sin(pyaw[-1]))
236 |
237 | if m == "L": # left turn
238 | pyaw.append(pyaw[-1] + d)
239 | elif m == "S": # Straight
240 | pyaw.append(pyaw[-1])
241 | elif m == "R": # right turn
242 | pyaw.append(pyaw[-1] - d)
243 | pd += d
244 |
245 | d = l - pd
246 | px.append(px[-1] + d * c * math.cos(pyaw[-1]))
247 | py.append(py[-1] + d * c * math.sin(pyaw[-1]))
248 |
249 | if m == "L": # left turn
250 | pyaw.append(pyaw[-1] + d)
251 | elif m == "S": # Straight
252 | pyaw.append(pyaw[-1])
253 | elif m == "R": # right turn
254 | pyaw.append(pyaw[-1] - d)
255 | pd += d
256 |
257 | return px, py, pyaw
258 |
259 |
260 | def get_coordinates(maneuver, offset):
261 | noffset = offset / maneuver.r_min
262 | qi = [0., 0., maneuver.qi[2]]
263 |
264 | l1 = maneuver.t
265 | l2 = maneuver.p
266 | q1 = get_position_in_segment(l1, qi, maneuver.mode[0]) # Final do segmento 1
267 | q2 = get_position_in_segment(l2, q1, maneuver.mode[1]) # Final do segmento 2
268 |
269 | if noffset < l1:
270 | q = get_position_in_segment(noffset, qi, maneuver.mode[0])
271 | elif noffset < (l1 + l2):
272 | q = get_position_in_segment(noffset - l1, q1, maneuver.mode[1])
273 | else:
274 | q = get_position_in_segment(noffset - l1 - l2, q2, maneuver.mode[2])
275 |
276 | q[0] = q[0] * maneuver.r_min + qi[0]
277 | q[1] = q[1] * maneuver.r_min + qi[1]
278 | q[2] = mod2pi(q[2])
279 |
280 | return q
281 |
282 |
283 | def get_position_in_segment(offset, qi, mode):
284 | q = [0.0, 0.0, 0.0]
285 | if mode == 'L':
286 | q[0] = qi[0] + math.sin(qi[2] + offset) - math.sin(qi[2])
287 | q[1] = qi[1] - math.cos(qi[2] + offset) + math.cos(qi[2])
288 | q[2] = qi[2] + offset
289 | elif mode == 'R':
290 | q[0] = qi[0] - math.sin(qi[2] - offset) + math.sin(qi[2])
291 | q[1] = qi[1] + math.cos(qi[2] - offset) - math.cos(qi[2])
292 | q[2] = qi[2] - offset
293 | elif mode == 'S':
294 | q[0] = qi[0] + math.cos(qi[2]) * offset
295 | q[1] = qi[1] + math.sin(qi[2]) * offset
296 | q[2] = qi[2]
297 | return q
298 |
299 |
300 | def get_sampling_points(maneuver, sampling_size=0.1):
301 | points = []
302 | for offset in np.arange(0.0, maneuver.length+sampling_size, sampling_size):
303 | points.append(get_coordinates(maneuver, offset))
304 | return points
305 |
306 |
307 | def plot_arrow(x, y, yaw, length=1.0, width=0.5, fc="r", ec="k"): # pragma: no cover
308 | """
309 | Plot arrow
310 | """
311 |
312 | if not isinstance(x, float):
313 | for (ix, iy, iyaw) in zip(x, y, yaw):
314 | plot_arrow(ix, iy, iyaw)
315 | else:
316 | plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw),
317 | fc=fc, ec=ec, head_width=width, head_length=width)
318 | plt.plot(x, y)
319 |
320 |
321 | if __name__ == '__main__':
322 | print("Dubins Path Planner Start!!")
323 |
324 | test_qi = [[0.0, 5.0, np.deg2rad(180.0)], [10.0, 5.0, np.deg2rad(180.0)]] # start_x, start_y, start_yaw
325 | test_qf = [[10.0, 5.0, np.deg2rad(180.0)], [0.0, 5.0, np.deg2rad(0.0)]] # end_x, end_y, end_yaw
326 |
327 | rmin = 1.5
328 |
329 | test_maneuver = dubins_path_planning(test_qi[0], test_qf[0], rmin)
330 | test_maneuver1 = dubins_path_planning(test_qi[1], test_qf[1], rmin)
331 |
332 | path = []
333 | for i in range(len(test_maneuver.px)):
334 | path.append([test_maneuver.px[i], test_maneuver.py[i], test_maneuver.pyaw[i]])
335 | print(len(path), test_maneuver.length, path)
336 |
337 | plt.plot(test_maneuver.px, test_maneuver.py, label="test_maneuver " + "".join(test_maneuver.mode))
338 | # plt.plot(test_maneuver1.px, test_maneuver1.py, label="test_maneuver1 " + "".join(test_maneuver1.mode))
339 |
340 |
341 | # plotting
342 | plot_arrow(test_qi[0][0], test_qi[0][1], test_qi[0][2])
343 | plot_arrow(test_qf[0][0], test_qf[0][1], test_qf[0][2])
344 | # plot_arrow(test_qi[1][0], test_qi[1][1], test_qi[1][2])
345 | # plot_arrow(test_qf[1][0], test_qf[1][1], test_qf[1][2])
346 |
347 | plt.legend()
348 | plt.grid(True)
349 | plt.axis("equal")
350 | plt.show()
351 |
--------------------------------------------------------------------------------
/mamp/policies/sca/dubinsmaneuver3d.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2021.8.13
4 | @ Details: 3D dubins path planner
5 | @ reference: 2020 ICRA Minimal 3D Dubins Path with Bounded Curvature and Pitch Angle
6 | @ github: https://github.com/comrob/Dubins3D.jl
7 | """
8 |
9 | import numpy as np
10 | import math
11 | import matplotlib.pyplot as plt
12 |
13 | from mamp.policies.sca.dubinsmaneuver2d import dubins_path_planning, get_coordinates, plot_arrow
14 |
15 |
16 | class DubinsManeuver3D(object):
17 | def __init__(self, qi, qf, Rmin, pitchlims):
18 | self.qi = qi
19 | self.qf = qf
20 | self.Rmin = Rmin
21 | self.pitchlims = pitchlims
22 | self.px = []
23 | self.py = []
24 | self.pz = []
25 | self.psi = []
26 | self.gamma = []
27 | self.maneuvers2d = []
28 | self.length = -1.0
29 | self.mode = None
30 | self.path = []
31 | self.sampling_size = 0.1
32 |
33 |
34 | def dubinsmaneuver3d(qi, qf, Rmin, pitchlims):
35 | """
36 | Dubins3D path plannner
37 |
38 | input:qi, qf, Rmin, pitchlims
39 | qi[0] x position of start point [m]
40 | qi[1] y position of start point [m]
41 | qi[2] z position of start point [m]
42 | qi[3] yaw heading angle of start point [rad]
43 | qi[4] pitch pitch angle of start point [rad]
44 | qf[0] x position of end point [m]
45 | qf[1] y position of end point [m]
46 | qf[2] z position of end point [m]
47 | qf[3] yaw heading angle of end point [rad]
48 | qf[4] pitch pitch angle of end point [rad]
49 | Rmin radius [m]
50 | pitchlims[0] min pitch angle [rad]
51 | pitchlims[1] max pitch angle [rad]
52 |
53 | output: maneuver
54 | maneuver.px x position sets [m]
55 | maneuver.py y position sets [m]
56 | maneuver.pz z position sets [m]
57 | maneuver.psi heading angle sets [rad]
58 | maneuver.gamma pitch angle sets [rad]
59 | maneuver.maneuvers2d horizontal and vertical dubins
60 | maneuver.length length of 3D dubins
61 | maneuver.mode mode of 3D dubins
62 | maneuver.path coordinates of 3D dubins
63 | """
64 | maneuver3d = DubinsManeuver3D(qi, qf, Rmin, pitchlims)
65 | # Delta Z (height)
66 | zi = qi[2]
67 | zf = qf[2]
68 | dz = zf - zi
69 |
70 | # Multiplication factor of Rmin in [1, 1000]
71 | a = 1.0
72 | b = 1.0
73 |
74 | fa = try_to_construct(maneuver3d, maneuver3d.Rmin * a)
75 | fb = try_to_construct(maneuver3d, maneuver3d.Rmin * b)
76 | while len(fb) < 2:
77 | b *= 2.0
78 | fb = try_to_construct(maneuver3d, maneuver3d.Rmin * b)
79 |
80 | if len(fa) > 0:
81 | maneuver3d.maneuvers2d = fa
82 | else:
83 | if len(fb) < 2:
84 | print("No maneuver exists")
85 |
86 | # Local optimization between horizontal and vertical radii
87 | step = 0.1
88 | while abs(step) > 1e-10:
89 | c = b + step
90 | if c < 1.0:
91 | c = 1.0
92 | fc = try_to_construct(maneuver3d, maneuver3d.Rmin * c)
93 | if len(fc) > 0:
94 | if fc[1].length < fb[1].length: # length
95 | b = c
96 | fb = fc
97 | step *= 2.
98 | continue
99 | step *= -0.1
100 |
101 | maneuver3d.maneuvers2d = fb
102 | maneuver3d.length = fb[1].length
103 | mode3d = ""
104 | mode = fb[0].mode + fb[1].mode
105 | mode3d = mode3d.join(mode)
106 | maneuver3d.mode = mode3d
107 | # draw_dubins2d(fb[0])
108 | # draw_dubins2d(fb[1])
109 | # print(mode3d)
110 |
111 | compute_sampling(maneuver3d, sampling_size=0.1) # get coordinate
112 |
113 | return maneuver3d
114 |
115 |
116 | def compute_sampling(maneuver3d, sampling_size=np.deg2rad(6.0)):
117 | maneuver_h, maneuver_v = maneuver3d.maneuvers2d
118 | if maneuver3d.length > 100:
119 | sampling_size = maneuver3d.length / 1000
120 | # Sample points on the final path
121 | maneuver3d.sampling_size = sampling_size
122 | qi = maneuver3d.qi
123 | for ran in np.arange(0, maneuver3d.length + sampling_size, sampling_size):
124 | offset = ran
125 | qSZ = get_coordinates(maneuver_v, offset)
126 | qXY = get_coordinates(maneuver_h, qSZ[0])
127 | maneuver3d.px.append(qXY[0] + qi[0])
128 | maneuver3d.py.append(qXY[1] + qi[1])
129 | maneuver3d.pz.append(qSZ[1] + qi[2])
130 | maneuver3d.psi.append(qXY[2])
131 | maneuver3d.gamma.append(qSZ[2])
132 | maneuver3d.path.append([qXY[0] + qi[0], qXY[1] + qi[1], qSZ[1] + qi[2], qXY[2], qSZ[2]])
133 |
134 |
135 | def try_to_construct(maneuver3d, horizontal_radius):
136 | qi2D = [maneuver3d.qi[0], maneuver3d.qi[1], maneuver3d.qi[3]] # x, y, yaw
137 | qf2D = [maneuver3d.qf[0], maneuver3d.qf[1], maneuver3d.qf[3]] # x, y, yaw
138 |
139 | maneuver_h = dubins_path_planning(qi2D, qf2D, horizontal_radius)
140 |
141 | # After finding a long enough 2D curve, calculate the Dubins path on SZ axis
142 | qi3D = [0.0, maneuver3d.qi[2], maneuver3d.qi[4]]
143 | qf3D = [maneuver_h.length, maneuver3d.qf[2], maneuver3d.qf[4]]
144 |
145 | Rmin = maneuver3d.Rmin
146 | vertical_curvature = math.sqrt(1.0 / Rmin ** 2 - 1.0 / horizontal_radius ** 2)
147 | if vertical_curvature < 1e-5:
148 | return []
149 | vertical_radius = 1.0 / vertical_curvature
150 | maneuver_v = dubins_path_planning(qi3D, qf3D, vertical_radius)
151 |
152 | case = ""
153 | case = case.join(maneuver_v.mode)
154 | if case == "RLR" or case == "RLR":
155 | return []
156 | if case[0] == "R":
157 | if maneuver3d.qi[4] - maneuver_v.t < maneuver3d.pitchlims[0]:
158 | return []
159 | else:
160 | if maneuver3d.qi[4] + maneuver_v.t > maneuver3d.pitchlims[1]:
161 | return []
162 | return [maneuver_h, maneuver_v] # Final 3D path is formed by the two curves (maneuver_h, maneuver_v)
163 |
164 |
165 | def set_ax(ax):
166 | ax.set_aspect('auto')
167 | ax.set_xlabel('X(m)')
168 | ax.set_xlim(-22, 25)
169 | ax.set_ylabel('Y(m)')
170 | ax.set_ylim(-22, 25)
171 | ax.set_zlabel('Z(m)')
172 | ax.set_zlim(-5, 50)
173 |
174 |
175 | def draw_dubins3d(px_sets, py_sets, pz_sets):
176 | fig1 = plt.figure()
177 | ax = fig1.add_subplot(111, projection='3d')
178 | ax.plot3D(px_sets, py_sets, pz_sets, 'red')
179 | set_ax(ax)
180 | plt.show()
181 |
182 |
183 | def draw_dubins2d(maneuver2d):
184 | plt.plot(maneuver2d.px, maneuver2d.py, label="final course " + "".join(maneuver2d.mode))
185 | # plotting
186 | plot_arrow(maneuver2d.qi[0], maneuver2d.qi[1], maneuver2d.qi[2])
187 | plot_arrow(maneuver2d.qf[0], maneuver2d.qf[1], maneuver2d.qf[2])
188 | plt.legend()
189 | plt.grid(True)
190 | plt.axis("equal")
191 | plt.show()
192 |
193 |
194 | if __name__ == '__main__':
195 | print("3D-Dubins Planner Start!!")
196 | """
197 | 3D-Dubins path plannner
198 |
199 | input:
200 | start_x : x position of start point [m]
201 | start_y : y position of start point [m]
202 | start_z : z position of start point [m]
203 | start_psi : heading angle of start point [rad]
204 | start_gamma : flight path angle of start point [rad]
205 |
206 | end_x : x position of end point [m]
207 | end_y : y position of end point [m]
208 | end_z : z position of end point [m]
209 | end_psi : heading angle of end point [rad]
210 | end_gamma : flight path angle of end point [rad]
211 |
212 | Rmin : minimum turning radius [m]
213 |
214 | output:
215 | px : x coordinates of path
216 | py : y coordinates of path
217 | pz : z coordinates of path
218 | ppsi : heading angle of path points
219 | pgamma: flight path angle of path points
220 | mode : type of curve
221 |
222 | """
223 | n = 4
224 | k = 45
225 | test_qi = [[0.0, 0.0, 3.0, np.deg2rad(-90), np.deg2rad(0.0)], [0.0, 0.0, 13.0, np.deg2rad(-90), np.deg2rad(0.0)]]
226 | test_qf = [[0.0, 0.0, 13.0, np.deg2rad(90), np.deg2rad(0.0)], [0.0, 0.0, 3.0, np.deg2rad(90), np.deg2rad(0.0)]]
227 | test_pitchlims = [np.deg2rad(-45.0), np.deg2rad(45.0)]
228 | test_Rmin = 1.5
229 |
230 | '''instance from paper is true where the length is 976.79'''
231 | # test_qi = [-80.0, 10.0, 250.0, np.deg2rad(20.0), np.deg2rad(0.0)]
232 | # test_qf = [50.0, 70.0, 0.0, np.deg2rad(240.0), np.deg2rad(0.0)]
233 | # test_pitchlims = [np.deg2rad(-15.0), np.deg2rad(20.0)]
234 | # test_Rmin = 40
235 |
236 | try:
237 | maneuver = dubinsmaneuver3d(test_qi[0], test_qf[0], test_Rmin, test_pitchlims)
238 | maneuver1 = dubinsmaneuver3d(test_qi[1], test_qf[1], test_Rmin, test_pitchlims)
239 | print("Trajectory Type ---->", maneuver.mode, " and Length ----->", maneuver.length)
240 |
241 | print(len(maneuver.path), maneuver.path)
242 | print(len(maneuver1.path), maneuver1.path)
243 | # print(maneuver.path[75])
244 | draw_dubins3d(maneuver.px, maneuver.py, maneuver.pz)
245 | draw_dubins3d(maneuver1.px, maneuver1.py, maneuver1.pz)
246 |
247 | except:
248 | print("NOT POSSIBLE")
249 |
--------------------------------------------------------------------------------
/mamp/policies/sca/rvo3dDubinsPolicy.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2022.04.16
4 | @ Details: SCA for multi-agent motion planning
5 | @ reference: 2020 ICRA Minimal 3D Dubins Path with Bounded Curvature and Pitch Angle
6 | @ reference: Reciprocal Velocity Obstacles for Real-Time Multi-Agent Navigation
7 | @ github: https://github.com/MengGuo/RVO_Py_MAS
8 | """
9 | import time
10 | import numpy as np
11 | from math import sqrt, sin, cos, acos, pi
12 | from mamp.policies.sca import dubinsmaneuver3d
13 | from mamp.configs.config import eps, NEAR_GOAL_THRESHOLD
14 | from mamp.util import sqr, absSq, l3norm, is_parallel, is_intersect, satisfied_constraint
15 | from mamp.util import reached, cartesian2spherical
16 |
17 |
18 | class RVO3dDubinsPolicy(object):
19 | """ RVO3dDubinsPolicy """
20 |
21 | def __init__(self):
22 | self.now_goal = None
23 | self.update_now_goal_dist = 1.0
24 | self.type = "internal"
25 |
26 | def find_next_action(self, dict_comm, agent, kdTree):
27 | """
28 | Function: SCA compute suitable speed for agents
29 | """
30 | start_t = time.time()
31 | self.get_trajectory(agent) # update now_goal
32 | v_pref = compute_v_pref(agent)
33 | vA = agent.vel_global_frame
34 | if l3norm(vA, [0, 0, 0]) <= 1e-5:
35 | end_t = time.time()
36 | delta_t = end_t - start_t
37 | agent.total_time += delta_t
38 | vA_post = 0.3 * v_pref
39 | action = cartesian2spherical(agent, vA_post)
40 | theta = 0.0
41 | else:
42 | pA = agent.pos_global_frame
43 | RVO_BA_all = []
44 | agent_rad = agent.radius + 0.05
45 | computeNeighbors(agent, kdTree)
46 | # for obj in other_objects:
47 | for obj in agent.neighbors:
48 | obj = obj[0]
49 | pB = obj.pos_global_frame
50 | if obj.is_at_goal:
51 | transl_vB_vA = pA
52 | else:
53 | vB = obj.vel_global_frame
54 | transl_vB_vA = pA + 0.5 * (vB + vA) # use RVO
55 | obj_rad = obj.radius + 0.05
56 |
57 | RVO_BA = [transl_vB_vA, pA, pB, obj_rad + agent_rad]
58 | RVO_BA_all.append(RVO_BA)
59 | vA_post = intersect(v_pref, RVO_BA_all, agent)
60 | end_t = time.time()
61 | delta_t = end_t - start_t
62 | agent.total_time += delta_t
63 | action = cartesian2spherical(agent, vA_post)
64 | theta = acos(min(np.dot(vA, vA_post) / (np.linalg.norm(vA) * np.linalg.norm(vA_post)), 1.0))
65 |
66 | dist = round(l3norm(agent.pos_global_frame, agent.goal_global_frame), 5)
67 | if theta > round(agent.max_heading_change, 5):
68 | print('-------------agent' + str(agent.id), len(agent.neighbors), theta, action[3], '终点距离:', dist)
69 | else:
70 | print('agent' + str(agent.id), len(agent.neighbors), action[3], '终点距离:', dist)
71 | return action
72 |
73 | def get_trajectory(self, agent):
74 | if agent.path:
75 | if self.now_goal is None: # first
76 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
77 | dis = l3norm(agent.pos_global_frame, self.now_goal)
78 | dis_nowgoal_globalgoal = l3norm(self.now_goal, agent.goal_global_frame)
79 | dis_nowgoal_globalpos = l3norm(agent.pos_global_frame, agent.goal_global_frame)
80 | if dis <= self.update_now_goal_dist * agent.radius: # free collision
81 | if agent.path:
82 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
83 | elif dis_nowgoal_globalgoal >= dis_nowgoal_globalpos: # free back to current now_goal when free collision
84 | if agent.path:
85 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
86 | else:
87 | self.now_goal = agent.goal_global_frame
88 |
89 |
90 | def compute_dubins(agent):
91 | qi = np.hstack((agent.pos_global_frame, agent.heading_global_frame))
92 | qf = np.hstack((agent.goal_global_frame, agent.goal_heading_frame))
93 | Rmin, pitchlims = agent.turning_radius, agent.pitchlims
94 | maneuver = dubinsmaneuver3d.dubinsmaneuver3d(qi, qf, Rmin, pitchlims)
95 | dubins_path = maneuver.path.copy()
96 | agent.dubins_sampling_size = maneuver.sampling_size
97 | desire_length = maneuver.length
98 | points_num = len(dubins_path)
99 | path = []
100 | for i in range(points_num):
101 | path.append((dubins_path.pop()))
102 | return path, desire_length, points_num
103 |
104 |
105 | def computeNeighbors(agent, kdTree):
106 | if agent.is_collision:
107 | return
108 |
109 | agent.neighbors.clear()
110 | rangeSq = agent.neighborDist ** 2
111 | # check obstacle neighbors
112 | kdTree.computeObstacleNeighbors(agent, rangeSq)
113 | # check other agents
114 | kdTree.computeAgentNeighbors(agent, rangeSq)
115 |
116 |
117 | def compute_without_suitV(agent, RVO_BA_all, unsuit_v):
118 | tc = []
119 | for RVO_BA in RVO_BA_all:
120 | p_0 = RVO_BA[0]
121 | pA = RVO_BA[1]
122 | pB = RVO_BA[2]
123 | combined_radius = RVO_BA[3]
124 | v_dif = np.array(unsuit_v + pA - p_0)
125 | pApB = pB - pA
126 | if is_intersect(pA, pB, combined_radius, v_dif) and satisfied_constraint(agent, unsuit_v):
127 | discr = sqr(np.dot(v_dif, pApB)) - absSq(v_dif) * (absSq(pApB) - sqr(combined_radius))
128 | tc_v = (np.dot(v_dif, pApB) - sqrt(discr)) / absSq(v_dif)
129 | if tc_v < 0:
130 | tc_v = 0.0
131 | tc.append(tc_v)
132 | if len(tc) == 0:
133 | tc = [0.0]
134 | return tc
135 |
136 |
137 | def compute_newV_is_suit(agent, RVO_BA_all, new_v):
138 | suit = True
139 | if len(RVO_BA_all) == 0:
140 | if not satisfied_constraint(agent, new_v):
141 | suit = False
142 | return suit
143 |
144 | for RVO_BA in RVO_BA_all:
145 | p_0 = RVO_BA[0]
146 | pA = RVO_BA[1]
147 | pB = RVO_BA[2]
148 | combined_radius = RVO_BA[3]
149 | v_dif = np.array(new_v + pA - p_0) # new_v-0.5*(vA+vB) or new_v-vB
150 | if is_intersect(pA, pB, combined_radius, v_dif) or not satisfied_constraint(agent, new_v):
151 | suit = False
152 | break
153 | return suit
154 |
155 |
156 | def intersect(v_pref, RVO_BA_all, agent):
157 | num_N = 256
158 | param_phi = (sqrt(5.0) - 1.0) / 2.0
159 | min_speed = 0.5
160 | suitable_V = []
161 | unsuitable_V = []
162 | for rad in np.arange(min_speed, agent.pref_speed + 0.03, agent.pref_speed - min_speed):
163 | for n in range(1, num_N + 1):
164 | z_n = (2 * n - 1) / num_N - 1
165 | x_n = sqrt(1 - z_n ** 2) * cos(2 * pi * n * param_phi)
166 | y_n = sqrt(1 - z_n ** 2) * sin(2 * pi * n * param_phi)
167 | new_v = np.array([rad * x_n, rad * y_n, rad * z_n])
168 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
169 | if suit:
170 | suitable_V.append(new_v)
171 | else:
172 | unsuitable_V.append(new_v)
173 | new_v = v_pref[:]
174 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
175 | if suit:
176 | suitable_V.append(new_v)
177 | else:
178 | unsuitable_V.append(new_v)
179 | # ----------------------
180 | if suitable_V:
181 | suitable_V.sort(key=lambda v: l3norm(v, v_pref)) # sort begin at minimum and end at maximum
182 | vA_post = suitable_V[0]
183 | else:
184 | # print('--------------------Suitable not found', 'agent', agent.id, len(suitable_V), len(unsuitable_V))
185 | tc_V = dict()
186 | for unsuit_v in unsuitable_V:
187 | unsuit_v = np.array(unsuit_v)
188 | tc_V[tuple(unsuit_v)] = 0
189 | tc = compute_without_suitV(agent, RVO_BA_all, unsuit_v)
190 | tc_V[tuple(unsuit_v)] = min(tc) + 1e-5
191 | WT = 0.2
192 | vA_post = min(unsuitable_V, key=lambda v: ((WT / tc_V[tuple(v)]) + l3norm(v, v_pref)))
193 | vA_post = np.array([int(vA_post[0] * eps) / eps, int(vA_post[1] * eps) / eps, int(vA_post[2] * eps) / eps])
194 | return vA_post
195 |
196 |
197 | def update_dubins(agent):
198 | dis = l3norm(agent.pos_global_frame, agent.dubins_now_goal)
199 | sampling_size = agent.dubins_sampling_size
200 | if dis < sampling_size * 2:
201 | if agent.dubins_path:
202 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
203 | else:
204 | agent.dubins_now_goal = agent.goal_global_frame
205 |
206 |
207 | def dubins_path_node_pop(agent):
208 | if agent.dubins_path:
209 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
210 | if agent.dubins_path:
211 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
212 | if agent.dubins_path:
213 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
214 | if agent.dubins_path:
215 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
216 |
217 |
218 | def compute_v_pref(agent):
219 | """
220 | is_parallel(vA, v_pref) —— 判断是否因为避碰离开Dubins轨迹
221 | dis_goal <= k —— 小于一定距离的时候默认无障碍物
222 | dis < 5 * sampling_size —— 当避碰行为没有导致远离原Dubins曲线, 继续跟踪原Dubins曲线
223 | theta >= np.deg2rad(100) —— 避免更新Dubins曲线之后一直更新导致智能体离目标越来越远
224 | """
225 | dis_goal = l3norm(agent.pos_global_frame, agent.goal_global_frame)
226 | k = 3.0 * agent.turning_radius
227 | if not agent.is_use_dubins: # first
228 | agent.is_use_dubins = True
229 | dubins_path, desire_length, points_num = compute_dubins(agent)
230 | agent.dubins_path = dubins_path
231 | dubins_path_node_pop(agent)
232 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
233 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
234 | elif agent.is_back2start and dis_goal <= 1.5 * NEAR_GOAL_THRESHOLD: # 返回起点区域, 朝向角和初始状态一致
235 | agent.is_back2start = False
236 | agent.goal_global_frame = agent.initial_pos[:3]
237 | agent.goal_heading_frame = agent.initial_heading
238 | # dif_x = agent.goal_global_frame - agent.pos_global_frame
239 | dubins_path, desire_length, points_num = compute_dubins(agent)
240 | agent.dubins_path = dubins_path
241 | dubins_path_node_pop(agent)
242 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
243 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
244 | else:
245 | update_dubins(agent)
246 | vA = np.array(agent.vel_global_frame)
247 | v_pref = agent.v_pref
248 | dis = l3norm(agent.pos_global_frame, agent.dubins_now_goal)
249 | max_size = round(6 * agent.dubins_sampling_size, 5)
250 | pApG = agent.goal_global_frame - agent.pos_global_frame
251 | theta = round(acos(min(np.dot(vA, pApG) / (np.linalg.norm(vA) * np.linalg.norm(pApG)), 1.0)), 5)
252 | deg100 = round(np.deg2rad(100), 5)
253 | min_dist_ob = round(sqrt(agent.neighbors[0][1]), 5) if agent.neighbors else round(agent.neighborDist)
254 | p0pA = agent.goal_pos[:3] - agent.initial_pos[:3]
255 | is_zAxis = abs(np.dot(p0pA, [1.0, 0.0, 0.0])) <= 1e-5 and abs(np.dot(p0pA, [0.0, 1.0, 0.0])) <= 1e-5
256 | condition_dist = (min_dist_ob >= 2.0 * agent.turning_radius) if is_zAxis else False
257 | if ((is_parallel(vA, v_pref) or dis_goal <= k) and dis < max_size) or (theta >= deg100) or condition_dist:
258 | # print('----------------------------------', 111, is_parallel(vA, v_pref), condition_dist)
259 | update_dubins(agent)
260 | if agent.dubins_path:
261 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
262 | else:
263 | dif_x = agent.goal_global_frame - agent.pos_global_frame
264 | else:
265 | # print('----------------------------------', 222, is_parallel(vA, v_pref), condition_dist)
266 | dubins_path, length, points_num = compute_dubins(agent)
267 | agent.dubins_path = dubins_path
268 | dubins_path_node_pop(agent)
269 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
270 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
271 |
272 | norm = l3norm(dif_x, [0, 0, 0])
273 | norm_dif_x = dif_x * agent.pref_speed / norm
274 | v_pref = np.array(norm_dif_x)
275 | if reached(agent.goal_global_frame, agent.pos_global_frame, bound=0.2):
276 | v_pref[0] = 0.0
277 | v_pref[1] = 0.0
278 | v_pref[2] = 0.0
279 | agent.v_pref = v_pref
280 | V_des = np.array([int(v_pref[0] * eps) / eps, int(v_pref[1] * eps) / eps, int(v_pref[2] * eps) / eps])
281 | return V_des
282 |
283 |
284 | if __name__ == "__main__":
285 | pass
286 |
--------------------------------------------------------------------------------
/mamp/policies/sca/scaPolicy.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2022.04.16
4 | @ Details: SCA for multi-agent motion planning
5 | @ reference: 2020 ICRA Minimal 3D Dubins Path with Bounded Curvature and Pitch Angle
6 | @ reference: Reciprocal Velocity Obstacles for Real-Time Multi-Agent Navigation
7 | @ github: https://github.com/MengGuo/RVO_Py_MAS
8 | """
9 | import time
10 | import numpy as np
11 | from math import sqrt, sin, cos, acos, pi
12 | from mamp.policies.sca import dubinsmaneuver3d
13 | from mamp.configs.config import eps, NEAR_GOAL_THRESHOLD
14 | from mamp.util import sqr, absSq, l3norm, is_parallel, is_intersect, satisfied_constraint
15 | from mamp.util import get_phi, reached, cartesian2spherical
16 |
17 |
18 | class SCAPolicy(object):
19 | """ SCAPolicy """
20 |
21 | def __init__(self):
22 | self.now_goal = None
23 | self.update_now_goal_dist = 1.0
24 | self.type = "internal"
25 |
26 | def find_next_action(self, dict_comm, agent, kdTree):
27 | """
28 | Function: SCA compute suitable speed for agents
29 | """
30 | start_t = time.time()
31 | self.get_trajectory(agent) # update now_goal
32 | v_pref = compute_v_pref(agent)
33 | vA = agent.vel_global_frame
34 | if l3norm(vA, [0, 0, 0]) <= 1e-5:
35 | end_t = time.time()
36 | delta_t = end_t - start_t
37 | agent.total_time += delta_t
38 | vA_post = 0.3 * v_pref
39 | action = cartesian2spherical(agent, vA_post)
40 | theta = 0.0
41 | else:
42 | pA = agent.pos_global_frame
43 | RVO_BA_all = []
44 | agent_rad = agent.radius + 0.05
45 | computeNeighbors(agent, kdTree)
46 | # for obj in other_objects:
47 | for obj in agent.neighbors:
48 | obj = obj[0]
49 | pB = obj.pos_global_frame
50 | vB = obj.vel_global_frame
51 | # tc_B = 0.2
52 | # pB_pred = pB + vB * tc_B
53 | if obj.is_at_goal:
54 | transl_vB_vA = pA
55 | else:
56 | transl_vB_vA = pA + 0.5 * (vB + vA) # use RVO
57 | obj_rad = obj.radius + 0.05
58 |
59 | RVO_BA = [transl_vB_vA, pA, pB, obj_rad + agent_rad]
60 | RVO_BA_all.append(RVO_BA)
61 | vA_post = intersect(v_pref, RVO_BA_all, agent)
62 | end_t = time.time()
63 | delta_t = end_t - start_t
64 | agent.total_time += delta_t
65 | action = cartesian2spherical(agent, vA_post)
66 | theta = acos(min(np.dot(vA, vA_post) / (np.linalg.norm(vA) * np.linalg.norm(vA_post)), 1.0))
67 |
68 | dist = round(l3norm(agent.pos_global_frame, agent.goal_global_frame), 5)
69 | if theta > round(agent.max_heading_change, 5):
70 | print('-------------agent' + str(agent.id), len(agent.neighbors), theta, action[3], '终点距离:', dist)
71 | else:
72 | print('agent' + str(agent.id), len(agent.neighbors), action[3], '终点距离:', dist)
73 | return action
74 |
75 | def get_trajectory(self, agent):
76 | if agent.path:
77 | if self.now_goal is None: # first
78 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
79 | dis = l3norm(agent.pos_global_frame, self.now_goal)
80 | dis_nowgoal_globalgoal = l3norm(self.now_goal, agent.goal_global_frame)
81 | dis_nowgoal_globalpos = l3norm(agent.pos_global_frame, agent.goal_global_frame)
82 | if dis <= self.update_now_goal_dist * agent.radius: # free collision
83 | if agent.path:
84 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
85 | elif dis_nowgoal_globalgoal >= dis_nowgoal_globalpos: # free back to current now_goal when free collision
86 | if agent.path:
87 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
88 | else:
89 | self.now_goal = agent.goal_global_frame
90 |
91 |
92 | def compute_dubins(agent):
93 | qi = np.hstack((agent.pos_global_frame, agent.heading_global_frame))
94 | qf = np.hstack((agent.goal_global_frame, agent.goal_heading_frame))
95 | Rmin, pitchlims = agent.turning_radius, agent.pitchlims
96 | maneuver = dubinsmaneuver3d.dubinsmaneuver3d(qi, qf, Rmin, pitchlims)
97 | dubins_path = maneuver.path.copy()
98 | agent.dubins_sampling_size = maneuver.sampling_size
99 | desire_length = maneuver.length
100 | points_num = len(dubins_path)
101 | path = []
102 | for i in range(points_num):
103 | path.append((dubins_path.pop()))
104 | return path, desire_length, points_num
105 |
106 |
107 | def computeNeighbors(agent, kdTree):
108 | if agent.is_collision:
109 | return
110 |
111 | agent.neighbors.clear()
112 | rangeSq = agent.neighborDist ** 2
113 | # check obstacle neighbors
114 | kdTree.computeObstacleNeighbors(agent, rangeSq)
115 | # check other agents
116 | kdTree.computeAgentNeighbors(agent, rangeSq)
117 |
118 |
119 | def shunted_strategy(agent, v_list, threshold=5e-2):
120 | """
121 | @details: the generalized passsing on the right
122 | """
123 | opt_v = [v_list[0]]
124 | opti_flag = True
125 | vA = agent.vel_global_frame
126 | i = 1
127 | while opti_flag:
128 | if abs(l3norm(v_list[0], vA) - l3norm(v_list[i], vA)) < threshold: # 5e-2效果极好, 其他数值比不过
129 | opt_v.append(v_list[i])
130 | i += 1
131 | if i == len(v_list):
132 | break
133 | else:
134 | break
135 | # print('-----------------------------------', len(v_list), len(opt_v))
136 | vA_phi_min = min(opt_v, key=lambda v: get_phi(v))
137 | vA_phi_max = max(opt_v, key=lambda v: get_phi(v))
138 | opt_v_best = [vA_phi_min, vA_phi_max]
139 | phi_min = get_phi(vA_phi_min)
140 | phi_max = get_phi(vA_phi_max)
141 | if abs(phi_max - phi_min) <= pi:
142 | vA_opti = min(opt_v_best, key=lambda v: get_phi(v))
143 | else:
144 | vA_opti = max(opt_v_best, key=lambda v: get_phi(v))
145 | return vA_opti
146 |
147 |
148 | def compute_without_suitV(agent, RVO_BA_all, unsuit_v):
149 | tc = []
150 | for RVO_BA in RVO_BA_all:
151 | p_0 = RVO_BA[0]
152 | pA = RVO_BA[1]
153 | pB = RVO_BA[2]
154 | combined_radius = RVO_BA[3]
155 | v_dif = np.array(unsuit_v + pA - p_0)
156 | pApB = pB - pA
157 | if is_intersect(pA, pB, combined_radius, v_dif) and satisfied_constraint(agent, unsuit_v):
158 | discr = sqr(np.dot(v_dif, pApB)) - absSq(v_dif) * (absSq(pApB) - sqr(combined_radius))
159 | tc_v = (np.dot(v_dif, pApB) - sqrt(discr)) / absSq(v_dif)
160 | if tc_v < 0:
161 | tc_v = 0.0
162 | tc.append(tc_v)
163 | if len(tc) == 0:
164 | tc = [0.0]
165 | return tc
166 |
167 |
168 | def compute_newV_is_suit(agent, RVO_BA_all, new_v):
169 | suit = True
170 | if len(RVO_BA_all) == 0:
171 | if not satisfied_constraint(agent, new_v):
172 | suit = False
173 | return suit
174 |
175 | for RVO_BA in RVO_BA_all:
176 | p_0 = RVO_BA[0]
177 | pA = RVO_BA[1]
178 | pB = RVO_BA[2]
179 | combined_radius = RVO_BA[3]
180 | v_dif = np.array(new_v + pA - p_0) # new_v-0.5*(vA+vB) or new_v-vB
181 | if is_intersect(pA, pB, combined_radius, v_dif) or not satisfied_constraint(agent, new_v):
182 | suit = False
183 | break
184 | return suit
185 |
186 |
187 | def intersect(v_pref, RVO_BA_all, agent):
188 | p0pA = agent.goal_pos[:3] - agent.initial_pos[:3]
189 | is_zAxis = abs(np.dot(p0pA, [1.0, 0.0, 0.0])) <= 1e-5 and abs(np.dot(p0pA, [0.0, 1.0, 0.0])) <= 1e-5
190 | num_N = 256 if not is_zAxis else 128
191 | param_phi = (sqrt(5.0) - 1.0) / 2.0
192 | min_speed = 0.5
193 | suitable_V = []
194 | unsuitable_V = []
195 | for rad in np.arange(min_speed, agent.pref_speed + 0.03, agent.pref_speed - min_speed):
196 | for n in range(1, num_N + 1):
197 | z_n = (2 * n - 1) / num_N - 1
198 | x_n = sqrt(1 - z_n ** 2) * cos(2 * pi * n * param_phi)
199 | y_n = sqrt(1 - z_n ** 2) * sin(2 * pi * n * param_phi)
200 | new_v = np.array([rad * x_n, rad * y_n, rad * z_n])
201 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
202 | if suit:
203 | suitable_V.append(new_v)
204 | else:
205 | unsuitable_V.append(new_v)
206 | new_v = v_pref[:]
207 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
208 | if suit:
209 | suitable_V.append(new_v)
210 | else:
211 | unsuitable_V.append(new_v)
212 |
213 | # therehold_rate = len(agent.neighbors) / agent.maxNeighbors
214 | threshold = 3e-2
215 | # if len(agent.neighbors) <= 14:
216 | # threshold = 3e-2
217 | # ----------------------
218 | if suitable_V:
219 | suitable_V.sort(key=lambda v: l3norm(v, v_pref)) # sort begin at minimum and end at maximum
220 | if len(suitable_V) > 1:
221 | vA_post = shunted_strategy(agent, suitable_V, threshold=threshold)
222 | else:
223 | vA_post = suitable_V[0]
224 | else:
225 | print('--------------------Suitable not found', 'agent', agent.id, len(suitable_V), len(unsuitable_V))
226 | tc_V = dict()
227 | for unsuit_v in unsuitable_V:
228 | unsuit_v = np.array(unsuit_v)
229 | tc_V[tuple(unsuit_v)] = 0
230 | tc = compute_without_suitV(agent, RVO_BA_all, unsuit_v)
231 | tc_V[tuple(unsuit_v)] = min(tc) + 1e-5
232 | WT = 0.2
233 | # vA_post = min(unsuitable_V, key=lambda v: ((WT / tc_V[tuple(v)]) + l3norm(v, v_pref)))
234 | unsuitable_V.sort(key=lambda v: ((WT / tc_V[tuple(v)]) + l3norm(v, v_pref)))
235 | if len(unsuitable_V) > 1:
236 | vA_post = shunted_strategy(agent, unsuitable_V, threshold=5e-2)
237 | else:
238 | vA_post = unsuitable_V[0]
239 | vA_post = np.array([int(vA_post[0] * eps) / eps, int(vA_post[1] * eps) / eps, int(vA_post[2] * eps) / eps])
240 | return vA_post
241 |
242 |
243 | def update_dubins(agent):
244 | dis = l3norm(agent.pos_global_frame, agent.dubins_now_goal)
245 | sampling_size = agent.dubins_sampling_size
246 | if dis < sampling_size * 2:
247 | if agent.dubins_path:
248 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
249 | else:
250 | agent.dubins_now_goal = agent.goal_global_frame
251 |
252 |
253 | def dubins_path_node_pop(agent):
254 | if agent.dubins_path:
255 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
256 | if agent.dubins_path:
257 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
258 | if agent.dubins_path:
259 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
260 | if agent.dubins_path:
261 | agent.dubins_last_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
262 |
263 |
264 | def compute_v_pref(agent):
265 | """
266 | is_parallel(vA, v_pref) —— 判断是否因为避碰离开Dubins轨迹
267 | dis_goal <= k —— 小于一定距离的时候默认无障碍物
268 | dis < 5 * sampling_size —— 当避碰行为没有导致远离原Dubins曲线, 继续跟踪原Dubins曲线
269 | theta >= np.deg2rad(100) —— 避免更新Dubins曲线之后一直更新导致智能体离目标越来越远
270 | """
271 | dis_goal = l3norm(agent.pos_global_frame, agent.goal_global_frame)
272 | k = 3.0 * agent.turning_radius
273 | if not agent.is_use_dubins: # first
274 | agent.is_use_dubins = True
275 | dubins_path, desire_length, points_num = compute_dubins(agent)
276 | agent.dubins_path = dubins_path
277 | dubins_path_node_pop(agent)
278 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
279 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
280 | elif agent.is_back2start and dis_goal <= 1.5 * NEAR_GOAL_THRESHOLD: # 返回起点区域, 朝向角和初始状态一致
281 | agent.is_back2start = False
282 | agent.goal_global_frame = agent.initial_pos[:3]
283 | agent.goal_heading_frame = agent.initial_heading
284 | # dif_x = agent.goal_global_frame - agent.pos_global_frame
285 | dubins_path, desire_length, points_num = compute_dubins(agent)
286 | agent.dubins_path = dubins_path
287 | dubins_path_node_pop(agent)
288 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
289 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
290 | else:
291 | update_dubins(agent)
292 | vA = np.array(agent.vel_global_frame)
293 | v_pref = agent.v_pref
294 | dis = l3norm(agent.pos_global_frame, agent.dubins_now_goal)
295 | max_size = round(6 * agent.dubins_sampling_size, 5)
296 | pApG = agent.goal_global_frame - agent.pos_global_frame
297 | theta = round(acos(min(np.dot(vA, pApG) / (np.linalg.norm(vA) * np.linalg.norm(pApG)), 1.0)), 5)
298 | deg100 = round(np.deg2rad(100), 5)
299 | min_dist_ob = round(sqrt(agent.neighbors[0][1]), 5) if agent.neighbors else round(agent.neighborDist)
300 | p0pA = agent.goal_pos[:3] - agent.initial_pos[:3]
301 | is_zAxis = abs(np.dot(p0pA, [1.0, 0.0, 0.0])) <= 1e-5 and abs(np.dot(p0pA, [0.0, 1.0, 0.0])) <= 1e-5
302 | condition_dist = (min_dist_ob >= 2.0 * agent.turning_radius) if is_zAxis else False
303 | # dist_10_seq = False
304 | # if len(agent.travelled_traj_node) == agent.num_node:
305 | # dis1 = l3norm(agent.travelled_traj_node[0], agent.goal_global_frame)
306 | # # dis2 = l3norm(agent.travelled_traj_node[1], agent.goal_global_frame)
307 | # # dis3 = l3norm(agent.travelled_traj_node[2], agent.goal_global_frame)
308 | # # dis4 = l3norm(agent.travelled_traj_node[3], agent.goal_global_frame)
309 | # # dis5 = l3norm(agent.travelled_traj_node[4], agent.goal_global_frame)
310 | #
311 | # dis = l3norm(agent.travelled_traj_node[-1], agent.goal_global_frame)
312 | # dist_10_seq = dis1 < dis
313 | if ((is_parallel(vA, v_pref) or dis_goal <= k) and dis < max_size) or (theta >= deg100) or condition_dist:
314 | update_dubins(agent)
315 | if agent.dubins_path:
316 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
317 | # print('--------------', 111, is_parallel(vA, v_pref), condition_dist)
318 | else:
319 | dif_x = agent.goal_global_frame - agent.pos_global_frame
320 | # print('--------------', 222, is_parallel(vA, v_pref), condition_dist)
321 | else:
322 | # print('----------------------------------', 333, is_parallel(vA, v_pref), condition_dist)
323 | dubins_path, length, points_num = compute_dubins(agent)
324 | agent.dubins_path = dubins_path
325 | dubins_path_node_pop(agent)
326 | agent.dubins_now_goal = np.array(agent.dubins_path.pop()[:3], dtype='float64')
327 | dif_x = agent.dubins_now_goal - agent.pos_global_frame
328 |
329 | norm = l3norm(dif_x, [0, 0, 0])
330 | norm_dif_x = dif_x * agent.pref_speed / norm
331 | v_pref = np.array(norm_dif_x)
332 | if reached(agent.goal_global_frame, agent.pos_global_frame, bound=0.2):
333 | v_pref[0] = 0.0
334 | v_pref[1] = 0.0
335 | v_pref[2] = 0.0
336 | agent.v_pref = v_pref
337 | V_des = np.array([int(v_pref[0] * eps) / eps, int(v_pref[1] * eps) / eps, int(v_pref[2] * eps) / eps])
338 | return V_des
339 |
340 |
341 | if __name__ == "__main__":
342 | pass
343 |
--------------------------------------------------------------------------------
/mamp/policies/srvo3dPolicy.py:
--------------------------------------------------------------------------------
1 | """
2 | @ Author: Gang Xu
3 | @ Date: 2022.04.16
4 | @ Details: RVO3D+Dubins for multi-agent motion planning
5 | @ reference: Reciprocal Velocity Obstacles for Real-Time Multi-Agent Navigation
6 | @ github: https://github.com/MengGuo/RVO_Py_MAS
7 | """
8 | import time
9 | import numpy as np
10 | from math import sqrt, sin, cos, acos, pi
11 | from mamp.configs.config import eps, DT
12 | from mamp.util import sqr, absSq, l3norm, is_intersect, satisfied_constraint, get_phi, reached, cartesian2spherical
13 |
14 |
15 | class SRVO3DPolicy(object):
16 | """ SRVO3DPolicy """
17 |
18 | def __init__(self):
19 | self.now_goal = None
20 | self.update_now_goal_dist = 1.0
21 | self.type = "internal"
22 |
23 | def find_next_action(self, dict_comm, agent, kdTree):
24 | """
25 | Function: RVO3D compute suitable speed for agents
26 | """
27 | start_t = time.time()
28 | self.get_trajectory(agent) # update now_goal
29 | v_pref = compute_v_pref(self.now_goal, agent)
30 | vA = agent.vel_global_frame
31 | if l3norm(vA, [0, 0, 0]) <= 1e-5:
32 | vA_post = 0.3 * v_pref
33 | action = cartesian2spherical(agent, vA_post)
34 | end_t = time.time()
35 | delta_t = end_t - start_t
36 | agent.total_time += delta_t
37 | theta = 0.0
38 | else:
39 | pA = agent.pos_global_frame
40 | RVO_BA_all = []
41 | agent_rad = agent.radius + 0.05
42 | computeNeighbors(agent, kdTree)
43 |
44 | # for obj in other_objects:
45 | for obj in agent.neighbors:
46 | obj = obj[0]
47 | pB = obj.pos_global_frame
48 | if obj.is_at_goal:
49 | transl_vB_vA = pA
50 | else:
51 | vB = obj.vel_global_frame
52 | transl_vB_vA = pA + 0.5 * (vB + vA) # use RVO
53 | obj_rad = obj.radius + 0.05
54 |
55 | RVO_BA = [transl_vB_vA, pA, pB, obj_rad + agent_rad]
56 | RVO_BA_all.append(RVO_BA)
57 | vA_post = intersect(v_pref, RVO_BA_all, agent)
58 | action = cartesian2spherical(agent, vA_post)
59 | end_t = time.time()
60 | delta_t = end_t - start_t
61 | agent.total_time += delta_t
62 | theta = acos(min(np.dot(vA, action[:3]) / (np.linalg.norm(vA) * np.linalg.norm(action[:3])), 1.0))
63 |
64 | dist = round(l3norm(agent.pos_global_frame, agent.goal_global_frame), 5)
65 | if theta > agent.max_heading_change:
66 | print('-------------agent' + str(agent.id), len(agent.neighbors), theta, action[3], '终点距离:', dist)
67 | else:
68 | print('agent' + str(agent.id), len(agent.neighbors), action[3], '终点距离:', dist)
69 | return action
70 |
71 | def get_trajectory(self, agent):
72 | if agent.path:
73 | if self.now_goal is None: # first
74 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
75 | dis = l3norm(agent.pos_global_frame, self.now_goal)
76 | dis_nowgoal_globalgoal = l3norm(self.now_goal, agent.goal_global_frame)
77 | dis_nowgoal_globalpos = l3norm(agent.pos_global_frame, agent.goal_global_frame)
78 | if dis <= self.update_now_goal_dist * agent.radius: # free collision
79 | if agent.path:
80 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
81 | elif dis_nowgoal_globalgoal >= dis_nowgoal_globalpos: # free back to current now_goal when free collision
82 | if agent.path:
83 | self.now_goal = np.array(agent.path.pop(), dtype='float64')
84 | else:
85 | self.now_goal = agent.goal_global_frame
86 |
87 |
88 | def computeNeighbors(agent, kdTree):
89 | if agent.is_collision:
90 | return
91 |
92 | agent.neighbors.clear()
93 | rangeSq = agent.neighborDist ** 2
94 | # check obstacle neighbors
95 | kdTree.computeObstacleNeighbors(agent, rangeSq)
96 | # check other agents
97 | kdTree.computeAgentNeighbors(agent, rangeSq)
98 |
99 |
100 | def shunted_strategy(agent, v_list):
101 | """
102 | @details: the generalized passsing on the right
103 | """
104 | opt_v = [v_list[0]]
105 | opti_flag = True
106 | vA = agent.vel_global_frame
107 | i = 1
108 | while opti_flag:
109 | diff = abs(l3norm(v_list[0], vA) - l3norm(v_list[i], vA))
110 | if diff < 1e-1:
111 | opt_v.append(v_list[i])
112 | i += 1
113 | if i == len(v_list):
114 | break
115 | else:
116 | break
117 | vA_phi_min = min(opt_v, key=lambda v: get_phi(v))
118 | vA_phi_max = max(opt_v, key=lambda v: get_phi(v))
119 | opt_v_best = [vA_phi_min, vA_phi_max]
120 | phi_min = get_phi(vA_phi_min)
121 | phi_max = get_phi(vA_phi_max)
122 | if abs(phi_max - phi_min) <= pi:
123 | vA_opti = min(opt_v_best, key=lambda v: get_phi(v))
124 | else:
125 | vA_opti = max(opt_v_best, key=lambda v: get_phi(v))
126 | return vA_opti
127 |
128 |
129 | def compute_without_suitV(agent, RVO_BA_all, unsuit_v):
130 | tc = []
131 | for RVO_BA in RVO_BA_all:
132 | p_0 = RVO_BA[0]
133 | pA = RVO_BA[1]
134 | pB = RVO_BA[2]
135 | combined_radius = RVO_BA[3]
136 | v_dif = np.array(unsuit_v + pA - p_0)
137 | pApB = pB - pA
138 | if is_intersect(pA, pB, combined_radius, v_dif) and satisfied_constraint(agent, unsuit_v):
139 | discr = sqr(np.dot(v_dif, pApB)) - absSq(v_dif) * (absSq(pApB) - sqr(combined_radius))
140 | tc_v = (np.dot(v_dif, pApB) - sqrt(discr)) / absSq(v_dif)
141 | if tc_v < 0:
142 | tc_v = 0.0
143 | tc.append(tc_v)
144 | if len(tc) == 0:
145 | tc = [0.0]
146 | return tc
147 |
148 |
149 | def compute_newV_is_suit(agent, RVO_BA_all, new_v):
150 | suit = True
151 | if len(RVO_BA_all) == 0:
152 | if not satisfied_constraint(agent, new_v):
153 | suit = False
154 | return suit
155 |
156 | for RVO_BA in RVO_BA_all:
157 | p_0 = RVO_BA[0]
158 | pA = RVO_BA[1]
159 | pB = RVO_BA[2]
160 | combined_radius = RVO_BA[3]
161 | v_dif = np.array(new_v + pA - p_0) # new_v-0.5*(vA+vB) or new_v-vB
162 | if is_intersect(pA, pB, combined_radius, v_dif) or not satisfied_constraint(agent, new_v):
163 | suit = False
164 | break
165 | return suit
166 |
167 |
168 | def compute_v_pref(goal, agent):
169 | if agent.desire_path_length is None:
170 | agent.desire_path_length = l3norm(agent.pos_global_frame, agent.goal_global_frame) - 0.5
171 | agent.desire_points_num = agent.desire_path_length / DT
172 | dif_x = goal - agent.pos_global_frame
173 | norm = int(l3norm(dif_x, [0, 0, 0]) * eps) / eps
174 | norm_dif_x = dif_x * agent.pref_speed / norm
175 | v_pref = np.array(norm_dif_x)
176 | if reached(agent.goal_global_frame, agent.pos_global_frame, bound=0.2):
177 | v_pref[0] = 0.0
178 | v_pref[1] = 0.0
179 | v_pref[2] = 0.0
180 | agent.v_pref = v_pref
181 | V_des = np.array([int(v_pref[0] * eps) / eps, int(v_pref[1] * eps) / eps, int(v_pref[2] * eps) / eps])
182 | return V_des
183 |
184 |
185 | def intersect(v_pref, RVO_BA_all, agent):
186 | num_N = 256
187 | param_phi = (sqrt(5.0) - 1.0) / 2.0
188 | min_speed = 0.5
189 | suitable_V = []
190 | unsuitable_V = []
191 | for rad in np.arange(min_speed, agent.pref_speed + 0.03, agent.pref_speed - min_speed):
192 | for n in range(1, num_N + 1):
193 | z_n = (2 * n - 1) / num_N - 1
194 | x_n = sqrt(1 - z_n ** 2) * cos(2 * pi * n * param_phi)
195 | y_n = sqrt(1 - z_n ** 2) * sin(2 * pi * n * param_phi)
196 | new_v = np.array([rad * x_n, rad * y_n, rad * z_n])
197 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
198 | if suit:
199 | suitable_V.append(new_v)
200 | else:
201 | unsuitable_V.append(new_v)
202 | new_v = v_pref[:]
203 | suit = compute_newV_is_suit(agent, RVO_BA_all, new_v)
204 | if suit:
205 | suitable_V.append(new_v)
206 | else:
207 | unsuitable_V.append(new_v)
208 | # ----------------------
209 | if suitable_V:
210 | suitable_V.sort(key=lambda v: l3norm(v, v_pref)) # sort begin at minimum and end at maximum
211 | if len(suitable_V) > 1:
212 | vA_post = shunted_strategy(agent, suitable_V)
213 | else:
214 | vA_post = suitable_V[0]
215 | else:
216 | print('--------------------Suitable not found', 'agent', agent.id, len(suitable_V), len(unsuitable_V))
217 | tc_V = dict()
218 | for unsuit_v in unsuitable_V:
219 | unsuit_v = np.array(unsuit_v)
220 | tc_V[tuple(unsuit_v)] = 0
221 | tc = compute_without_suitV(agent, RVO_BA_all, unsuit_v)
222 | tc_V[tuple(unsuit_v)] = min(tc) + 1e-5
223 | WT = 0.2
224 | # vA_post = min(unsuitable_V, key=lambda v: ((WT / tc_V[tuple(v)]) + l3norm(v, v_pref)))
225 | unsuitable_V.sort(key=lambda v: ((WT / tc_V[tuple(v)]) + l3norm(v, v_pref)))
226 | if len(unsuitable_V) > 1:
227 | vA_post = shunted_strategy(agent, unsuitable_V)
228 | else:
229 | vA_post = unsuitable_V[0]
230 | vA_post = np.array([int(vA_post[0] * eps) / eps, int(vA_post[1] * eps) / eps, int(vA_post[2] * eps) / eps])
231 | return vA_post
232 |
233 |
234 | if __name__ == "__main__":
235 | pass
236 |
--------------------------------------------------------------------------------
/mamp/read_map.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from mamp.agents.obstacle import Obstacle
3 |
4 |
5 | class Voxels(object):
6 | def __init__(self, data, dims, translate, scale, axis_order):
7 | self.data = data
8 | self.dims = dims
9 | self.translate = translate
10 | self.scale = scale
11 | assert (axis_order in ('xzy', 'xyz'))
12 | self.axis_order = axis_order
13 |
14 |
15 | def read_as_3d_array(fp, fix_coords=True):
16 | dims, translate, scale = read_header(fp)
17 | raw_data = np.frombuffer(fp.read(), dtype=np.uint8)
18 | values, counts = raw_data[::2], raw_data[1::2]
19 | data = np.repeat(values, counts).astype(np.bool)
20 | data = data.reshape(dims)
21 | if fix_coords:
22 | # xzy to xyz TODO the right thing
23 | data = np.transpose(data, (0, 2, 1))
24 | axis_order = 'xyz'
25 | else:
26 | axis_order = 'xzy'
27 | return Voxels(data, dims, translate, scale, axis_order)
28 |
29 |
30 | def read_header(fp):
31 | # Read binvox head file
32 | line = fp.readline().strip()
33 | if not line.startswith(b'#binvox'):
34 | raise IOError('Not a binvox file')
35 | dims = list(map(int, fp.readline().strip().split(b' ')[1:]))
36 | translate = list(map(float, fp.readline().strip().split(b' ')[1:]))
37 | scale = list(map(float, fp.readline().strip().split(b' ')[1:]))[0]
38 | line = fp.readline()
39 | return dims, translate, scale
40 |
41 |
42 | def read_obstacle(center, environ, obs_path):
43 | obstacles = []
44 | if environ == "exp3":
45 | resolution = 0.1
46 | bias = [-13.5, -13.5, -1.4]
47 | # path = "./map.binvox"
48 | else:
49 | return obstacles
50 |
51 | with open(obs_path, 'rb') as f:
52 | model = read_as_3d_array(f)
53 | print('size(l, w, h):', model.dims, 'translate matrix:', model.translate, 'scale factor:', model.scale)
54 | count = 0
55 | floor_count = 0
56 | tree_count = 0
57 | for x in range(model.dims[0]):
58 | for y in range(model.dims[2]):
59 | for z in range(model.dims[1]):
60 | if model.data[x][y][z]:
61 | position = [(y + model.translate[1]) * resolution + bias[0] + center[0],
62 | (x + model.translate[0]) * resolution + bias[1] + center[1],
63 | z * resolution + bias[2]]
64 | if position[2] > -1:
65 | if tree_count == 10: # Display the obstacles above ground
66 | Obs = Obstacle(pos=position, shape_dict={'shape': "sphere", 'feature': 0.2},
67 | id=count)
68 | obstacles.append(Obs)
69 | count += 1
70 | tree_count = 0
71 | # print(count, ":", position)
72 | else:
73 | tree_count += 1
74 | else:
75 | if floor_count == 1000: # Not display the ground
76 | Obs = Obstacle(pos=position, shape_dict={'shape': "sphere", 'feature': 0.2},
77 | id=count)
78 | obstacles.append(Obs)
79 | count += 1
80 | # print(count, ":", position)
81 | floor_count = 0
82 | else:
83 | floor_count += 1
84 | print("obstacle_num:", count)
85 | return obstacles
--------------------------------------------------------------------------------
/mamp/util.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from math import sqrt, asin, acos, atan2, pi, floor
3 | eps = 10 ** 5 # Keep 5 decimal.
4 |
5 |
6 | def satisfied_constraint(agent, vCand):
7 | vA = agent.vel_global_frame
8 | next_pA = agent.pos_global_frame + agent.timeStep * vCand # Constraints of z-plane.
9 | # if next_pA[2] < 0.0:
10 | # print('---------------------------------- z-Axis < 0.0 is ture', next_pA[2])
11 | costheta = np.dot(vA, vCand) / (np.linalg.norm(vA) * np.linalg.norm(vCand))
12 | if costheta > 1.0:
13 | costheta = 1.0
14 | elif costheta < -1.0:
15 | costheta = -1.0
16 | theta = acos(costheta) # Rotational constraints.
17 | if theta <= agent.max_heading_change and next_pA[2] >= 0.0:
18 | return True
19 | else:
20 | return False
21 |
22 |
23 | def reached(p1, p2, bound=0.5):
24 | if l3norm(p1, p2) < bound:
25 | return True
26 | else:
27 | return False
28 |
29 |
30 | def is_intersect(pA, pB, combined_radius, v_dif):
31 | pAB = pB - pA
32 | dist_pAB = np.linalg.norm(pAB)
33 | if dist_pAB <= combined_radius:
34 | dist_pAB = combined_radius
35 | theta_pABBound = asin(combined_radius / dist_pAB)
36 | theta_pABvCand = acos(np.dot(pAB, v_dif) / (dist_pAB * np.linalg.norm(v_dif)))
37 | if theta_pABBound <= theta_pABvCand: # No intersecting or tangent.
38 |
39 | return False
40 | else:
41 | return True
42 |
43 |
44 | def cartesian2spherical(agent, vA_post):
45 | speed = l3norm(vA_post, [0, 0, 0])
46 | if speed < 0.001:
47 | alpha = 0.0
48 | beta = 0.0
49 | gamma = 0.0
50 | else:
51 | alpha = atan2(vA_post[1], vA_post[0]) - agent.heading_global_frame[0]
52 | beta = atan2(vA_post[2], sqrt(pow(vA_post[0], 2) + pow(vA_post[1], 2))) - agent.heading_global_frame[1]
53 | gamma = 0.0
54 | action = [vA_post[0], vA_post[1], vA_post[2], speed, alpha, beta, gamma]
55 | return action
56 |
57 |
58 | def det3order(a, b, c):
59 | a1 = a[0]*b[1]*c[2]
60 | a2 = a[1]*b[2]*c[0]
61 | a3 = a[2]*b[0]*c[1]
62 | b1 = a[2]*b[1]*c[0]
63 | b2 = a[1]*b[0]*c[2]
64 | b3 = a[0]*b[2]*c[1]
65 | return a1 + a2 + a3 - b1 - b2 - b3
66 |
67 |
68 | def length_vec(vec):
69 | return np.linalg.norm(vec)
70 |
71 |
72 | def normalize(vec):
73 | return np.array(vec) / np.linalg.norm(vec)
74 |
75 |
76 | def cross(v1, v2):
77 | return np.array([v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0]])
78 |
79 |
80 | def takeSecond(elem):
81 | return elem[1]
82 |
83 |
84 | def sqr(a):
85 | return a ** 2
86 |
87 |
88 | def absSq(vec):
89 | return np.dot(vec, vec)
90 |
91 |
92 | def l2norm(x, y):
93 | return round(sqrt(l2normsq(x, y)), 5)
94 |
95 |
96 | def l2normsq(x, y):
97 | return round((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2, 5)
98 |
99 |
100 | def l3normsq(x, y):
101 | return round((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2, 5)
102 |
103 |
104 | def l3norm(p1, p2):
105 | """ Compute Euclidean distance for 3D """
106 | return round(sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2 + (p1[2] - p2[2]) ** 2), 5)
107 |
108 |
109 | def pi_2_pi(angle): # to -pi - pi
110 | return (angle + pi) % (2 * pi) - pi
111 |
112 |
113 | def mod2pi(theta): # to 0 - 2*pi
114 | return theta - 2.0 * pi * floor(theta / 2.0 / pi)
115 |
116 |
117 | def leftOf(a, b, c):
118 | return det(a - c, b - a)
119 |
120 |
121 | def det(p, q):
122 | return p[0] * q[1] - p[1] * q[0]
123 |
124 |
125 | def is_parallel(vec1, vec2):
126 | """ Whether two three-dimensional vectors are parallel """
127 | assert vec1.shape == vec2.shape, r'Input parameter shape must be the same'
128 | norm_vec1 = np.linalg.norm(vec1)
129 | norm_vec2 = np.linalg.norm(vec2)
130 | vec1_normalized = vec1 / norm_vec1
131 | vec2_normalized = vec2 / norm_vec2
132 | if norm_vec1 <= 1e-5 or norm_vec2 <= 1e-5:
133 | return True
134 | elif round(1.0 - abs(np.dot(vec1_normalized, vec2_normalized)), 5) < 3e-3:
135 | return True
136 | else:
137 | return False
138 |
139 |
140 | def distance(p1, p2):
141 | """ Compute Euclidean distance for 3D """
142 | return round(sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2 + (p1[2] - p2[2]) ** 2) + 1e-5, 5)
143 |
144 |
145 | def get_phi(vec): # Compute the angle between two vectors after projection on the XY plane.
146 | if vec[1] >= 0:
147 | phi = atan2(vec[1], vec[0])
148 | else:
149 | phi = 2 * pi + atan2(vec[1], vec[0])
150 | return int(phi * eps) / eps
151 |
152 |
153 |
154 |
--------------------------------------------------------------------------------
/run_example/run_orca.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import json
4 | import math
5 | import random
6 | import pandas as pd
7 | import numpy as np
8 | from mamp.agents.agent import Agent
9 | from mamp.agents.obstacle import Obstacle
10 | from mamp.policies.orca3dPolicy import ORCA3DPolicy
11 | from mamp.configs.config import DT
12 | from mamp.envs.mampenv import MACAEnv
13 | from mamp.util import mod2pi
14 |
15 |
16 | def set_circle_pos(agent_num):
17 | center = (0.0, 0.0)
18 | rad = 18.0
19 | k = 0
20 | agent_origin_pos = []
21 | agent_pos = []
22 | agent_goal = []
23 | for j in range(agent_num):
24 | agent_pos.append([center[0] + round(rad * np.cos(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
25 | center[1] + round(rad * np.sin(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
26 | 10.0,
27 | round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0, 0])
28 | agent_origin_pos.append((agent_pos[j][0], agent_pos[j][1]))
29 |
30 | for j in range(agent_num):
31 | agent_goal.append(agent_pos[(j + int(agent_num / 2)) % agent_num][:3]
32 | + [round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0.0, 0.0])
33 | return agent_pos, agent_goal, agent_origin_pos
34 |
35 |
36 | def set_sphere(num_N):
37 | agent_origin_pos = []
38 | agent_pos = []
39 | agent_goal = []
40 | rad = 25.0
41 | z_value = 30.0
42 | param_phi = (math.sqrt(5.0) - 1.0) / 2.0
43 | for n in range(1, num_N + 1):
44 | z_n = (2 * n - 1) / num_N - 1
45 | x_n = math.sqrt(1 - z_n ** 2) * math.cos(2 * math.pi * n * param_phi)
46 | y_n = math.sqrt(1 - z_n ** 2) * math.sin(2 * math.pi * n * param_phi)
47 | pos = np.array([rad * x_n, rad * y_n, rad * z_n, 0.0, 0.0, 0.0])
48 | agent_pos.append(pos)
49 | agent_goal.append(-pos)
50 | for i in range(len(agent_pos)):
51 | agent_pos[i][2] = agent_pos[i][2] + z_value
52 | agent_goal[i][2] = agent_goal[i][2] + z_value
53 |
54 | return agent_pos, agent_goal, agent_origin_pos
55 |
56 |
57 | def set_random_pos(agent_num):
58 | agent_origin_pos = []
59 | agent_pos = []
60 | agent_goal = []
61 | z_value = 30.0
62 | r = 15.0
63 | for n in range(agent_num):
64 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r), 0.0, 0.0, 0.0])
65 | agent_pos.append(pos)
66 | for n in range(agent_num):
67 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r), 0.0, 0.0, 0.0])
68 | agent_goal.append(pos)
69 |
70 | for i in range(len(agent_pos)):
71 | agent_pos[i][2] = agent_pos[i][2] + z_value
72 | agent_goal[i][2] = agent_goal[i][2] + z_value
73 | return agent_pos, agent_goal, agent_origin_pos
74 |
75 |
76 | def build_agents():
77 | # build agents
78 | drone_num = 100
79 | init_vel = [0.0, 0.0, 0.0]
80 | radius = 0.5
81 | pref_speed = 1.0
82 | pos, goal, DroneOriginPos = set_circle_pos(drone_num)
83 | # pos, goal, DroneOriginPos = set_sphere(drone_num)
84 | # pos, goal, DroneOriginPos = set_random_pos(drone_num)
85 | agents = []
86 | for i in range(len(pos)):
87 | agents.append(Agent(start_pos=pos[i], goal_pos=goal[i],
88 | vel=init_vel, radius=radius,
89 | pref_speed=pref_speed, policy=ORCA3DPolicy,
90 | id=i, dt=DT))
91 | return agents
92 |
93 |
94 | def build_obstacles():
95 | ob_list = [[0.0, 0.0, 10.0]]
96 | obstacles = []
97 | # obstacles.append(Obstacle(pos=ob_list[0], shape_dict={'shape': "cube", 'feature': (2.0, 2.0, 2.0)}, id=0))
98 | return obstacles
99 |
100 |
101 | if __name__ == "__main__":
102 | total_time = 10000
103 | step = 0
104 |
105 | # Build agents and obstacles.
106 | agents = build_agents()
107 | obstacles = build_obstacles()
108 |
109 | env = MACAEnv()
110 | env.set_agents(agents, obstacles=obstacles)
111 |
112 | # Test for ORCA.
113 | agents_num = len(agents)
114 | cost_time = 0.0
115 | start_time = time.time()
116 | while step * DT < total_time:
117 | print(step, '')
118 |
119 | actions = {}
120 | which_agents_done = env.step(actions)
121 |
122 | # Is arrived.
123 | if which_agents_done:
124 | print("All agents finished!", step)
125 | end_time = time.time()
126 | cost_time = end_time - start_time
127 | print('cost time', cost_time)
128 | break
129 | step += 1
130 |
131 | log_save_dir = os.path.dirname(os.path.realpath(__file__)) + '/../visualization/orca3d/log/'
132 | os.makedirs(log_save_dir, exist_ok=True)
133 | # trajectory
134 | writer = pd.ExcelWriter(log_save_dir + '/trajs.xlsx')
135 | for a in agents:
136 | a.history_info.to_excel(writer, sheet_name='agent' + str(a.id))
137 | writer.save()
138 |
139 | # scenario information
140 | info_dict_to_visualize = {
141 | 'all_agent_info': [],
142 | 'all_obstacle': [],
143 | 'all_compute_time': 0.0,
144 | 'all_straight_distance': 0.0,
145 | 'all_distance': 0.0,
146 | 'successful_num': 0,
147 | 'all_desire_step_num': 0,
148 | 'all_step_num': 0,
149 | 'SuccessRate': 0.0,
150 | 'ExtraTime': 0.0,
151 | 'ExtraDistance': 0.0,
152 | 'AverageSpeed': 0.0,
153 | 'AverageCost': 0.0
154 | }
155 | all_straight_dist = 0.0
156 | all_agent_total_time = 0.0
157 | all_agent_total_dist = 0.0
158 | num_of_success = 0
159 | all_desire_step_num = 0
160 | all_step_num = 0
161 |
162 | SuccessRate = 0.0
163 | ExtraTime = 0.0
164 | ExtraDistance = 0.0
165 | AverageSpeed = 0.0
166 | AverageCost = 0.0
167 |
168 | for agent in agents:
169 | agent_info_dict = {'id': agent.id, 'gp': agent.group, 'radius': agent.radius,
170 | 'goal_pos': agent.goal_global_frame.tolist()}
171 | info_dict_to_visualize['all_agent_info'].append(agent_info_dict)
172 | if not agent.is_collision and not agent.is_out_of_max_time:
173 | num_of_success += 1
174 | all_agent_total_time += agent.total_time
175 | all_straight_dist += agent.straight_path_length
176 | all_agent_total_dist += agent.total_dist
177 | all_desire_step_num += agent.desire_steps
178 | all_step_num += agent.step_num
179 |
180 | info_dict_to_visualize['all_compute_time'] = all_agent_total_time
181 | info_dict_to_visualize['all_straight_distance'] = all_straight_dist
182 | info_dict_to_visualize['all_distance'] = all_agent_total_dist
183 | info_dict_to_visualize['successful_num'] = num_of_success
184 | info_dict_to_visualize['all_desire_step_num'] = all_desire_step_num
185 | info_dict_to_visualize['all_step_num'] = all_step_num
186 |
187 | info_dict_to_visualize['SuccessRate'] = num_of_success / agents_num
188 | info_dict_to_visualize['ExtraTime'] = ((all_step_num - all_desire_step_num) * DT) / num_of_success
189 | info_dict_to_visualize['ExtraDistance'] = (all_agent_total_dist - all_straight_dist) / num_of_success
190 | info_dict_to_visualize['AverageSpeed'] = all_agent_total_dist/all_step_num/DT
191 | info_dict_to_visualize['AverageCost'] = 1000*all_agent_total_time / all_step_num
192 |
193 | for obstacle in obstacles:
194 | obstacle_info_dict = {'position': obstacle.pos, 'shape': obstacle.shape, 'feature': obstacle.feature}
195 | info_dict_to_visualize['all_obstacle'].append(obstacle_info_dict)
196 |
197 | info_str = json.dumps(info_dict_to_visualize, indent=4)
198 | with open(log_save_dir + '/env_cfg.json', 'w') as json_file:
199 | json_file.write(info_str)
200 | json_file.close()
201 | step = 0
202 |
203 |
--------------------------------------------------------------------------------
/run_example/run_rvo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import json
4 | import math
5 | import random
6 | import pandas as pd
7 | import numpy as np
8 | from mamp.agents.agent import Agent
9 | from mamp.agents.obstacle import Obstacle
10 | from mamp.policies.rvo3dPolicy import RVO3DPolicy
11 | from mamp.configs.config import DT
12 | from mamp.envs.mampenv import MACAEnv
13 | from mamp.util import mod2pi
14 |
15 |
16 | def set_circle_pos(agent_num):
17 | center = (0.0, 0.0)
18 | rad = 18.0
19 | k = 0
20 | agent_origin_pos = []
21 | agent_pos = []
22 | agent_goal = []
23 | for j in range(agent_num):
24 | agent_pos.append([center[0] + round(rad * np.cos(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
25 | center[1] + round(rad * np.sin(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
26 | 10.0,
27 | round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0, 0])
28 | agent_origin_pos.append((agent_pos[j][0], agent_pos[j][1]))
29 |
30 | for j in range(agent_num):
31 | agent_goal.append(agent_pos[(j + int(agent_num / 2)) % agent_num][:3]
32 | + [round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0.0, 0.0])
33 | return agent_pos, agent_goal, agent_origin_pos
34 |
35 |
36 | def set_sphere(num_N):
37 | agent_origin_pos = []
38 | agent_pos = []
39 | agent_goal = []
40 | rad = 25.0
41 | z_value = 30.0
42 | param_phi = (math.sqrt(5.0) - 1.0) / 2.0
43 | for n in range(1, num_N + 1):
44 | z_n = (2 * n - 1) / num_N - 1
45 | x_n = math.sqrt(1 - z_n ** 2) * math.cos(2 * math.pi * n * param_phi)
46 | y_n = math.sqrt(1 - z_n ** 2) * math.sin(2 * math.pi * n * param_phi)
47 | pos = np.array([rad * x_n, rad * y_n, rad * z_n, 0.0, 0.0, 0.0])
48 | agent_pos.append(pos)
49 | agent_goal.append(-pos)
50 | for i in range(len(agent_pos)):
51 | agent_pos[i][2] = agent_pos[i][2] + z_value
52 | agent_goal[i][2] = agent_goal[i][2] + z_value
53 |
54 | return agent_pos, agent_goal, agent_origin_pos
55 |
56 |
57 | def set_random_pos(agent_num):
58 | agent_origin_pos = []
59 | agent_pos = []
60 | agent_goal = []
61 | z_value = 30.0
62 | r = 15.0
63 | for n in range(agent_num):
64 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r), 0.0, 0.0, 0.0])
65 | agent_pos.append(pos)
66 | for n in range(agent_num):
67 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r), 0.0, 0.0, 0.0])
68 | agent_goal.append(pos)
69 |
70 | for i in range(len(agent_pos)):
71 | agent_pos[i][2] = agent_pos[i][2] + z_value
72 | agent_goal[i][2] = agent_goal[i][2] + z_value
73 | return agent_pos, agent_goal, agent_origin_pos
74 |
75 |
76 | def build_agents():
77 | # build agents
78 | drone_num = 100
79 | init_vel = [0.0, 0.0, 0.0]
80 | radius = 0.5
81 | pref_speed = 1.0
82 | pos, goal, DroneOriginPos = set_circle_pos(drone_num)
83 | # pos, goal, DroneOriginPos = set_sphere(drone_num)
84 | # pos, goal, DroneOriginPos = set_random_pos(drone_num)
85 | agents = []
86 | for i in range(drone_num):
87 | agents.append(Agent(start_pos=pos[i], goal_pos=goal[i],
88 | vel=init_vel, radius=radius,
89 | pref_speed=pref_speed, policy=RVO3DPolicy,
90 | id=i, dt=DT))
91 | return agents
92 |
93 |
94 | def build_obstacles():
95 | ob_list = [[0.0, 0.0, 10.0]]
96 | obstacles = []
97 | # obstacles.append(Obstacle(pos=ob_list[0], shape_dict={'shape': "cube", 'feature': (2.0, 2.0, 2.0)}, id=0))
98 | return obstacles
99 |
100 |
101 | if __name__ == "__main__":
102 | total_time = 10000
103 | step = 0
104 |
105 | # Build agents and obstacles.
106 | agents = build_agents()
107 | obstacles = build_obstacles()
108 |
109 | env = MACAEnv()
110 | env.set_agents(agents, obstacles=obstacles)
111 |
112 | # Test for RVO3D.
113 | agents_num = len(agents)
114 | cost_time = 0.0
115 | start_time = time.time()
116 | while step * DT < total_time:
117 | print(step, '')
118 |
119 | actions = {}
120 | which_agents_done = env.step(actions)
121 |
122 | # Is arrived.
123 | if which_agents_done:
124 | print("All agents finished!", step)
125 | end_time = time.time()
126 | cost_time = end_time - start_time
127 | print('cost time', cost_time)
128 | break
129 | step += 1
130 |
131 | log_save_dir = os.path.dirname(os.path.realpath(__file__)) + '/../visualization/rvo3d/log/'
132 | os.makedirs(log_save_dir, exist_ok=True)
133 | # trajectory
134 | writer = pd.ExcelWriter(log_save_dir + '/trajs.xlsx')
135 | for a in agents:
136 | a.history_info.to_excel(writer, sheet_name='agent' + str(a.id))
137 | writer.save()
138 |
139 | # scenario information
140 | info_dict_to_visualize = {
141 | 'all_agent_info': [],
142 | 'all_obstacle': [],
143 | 'all_compute_time': 0.0,
144 | 'all_straight_distance': 0.0,
145 | 'all_distance': 0.0,
146 | 'successful_num': 0,
147 | 'all_desire_step_num': 0,
148 | 'all_step_num': 0,
149 | 'SuccessRate': 0.0,
150 | 'ExtraTime': 0.0,
151 | 'ExtraDistance': 0.0,
152 | 'AverageSpeed': 0.0,
153 | 'AverageCost': 0.0
154 | }
155 | all_straight_dist = 0.0
156 | all_agent_total_time = 0.0
157 | all_agent_total_dist = 0.0
158 | num_of_success = 0
159 | all_desire_step_num = 0
160 | all_step_num = 0
161 |
162 | SuccessRate = 0.0
163 | ExtraTime = 0.0
164 | ExtraDistance = 0.0
165 | AverageSpeed = 0.0
166 | AverageCost = 0.0
167 |
168 | for agent in agents:
169 | agent_info_dict = {'id': agent.id, 'gp': agent.group, 'radius': agent.radius,
170 | 'goal_pos': agent.goal_global_frame.tolist()}
171 | info_dict_to_visualize['all_agent_info'].append(agent_info_dict)
172 | if not agent.is_collision and not agent.is_out_of_max_time:
173 | num_of_success += 1
174 | all_agent_total_time += agent.total_time
175 | all_straight_dist += agent.straight_path_length
176 | all_agent_total_dist += agent.total_dist
177 | all_desire_step_num += agent.desire_steps
178 | all_step_num += agent.step_num
179 |
180 | info_dict_to_visualize['all_compute_time'] = all_agent_total_time
181 | info_dict_to_visualize['all_straight_distance'] = all_straight_dist
182 | info_dict_to_visualize['all_distance'] = all_agent_total_dist
183 | info_dict_to_visualize['successful_num'] = num_of_success
184 | info_dict_to_visualize['all_desire_step_num'] = all_desire_step_num
185 | info_dict_to_visualize['all_step_num'] = all_step_num
186 |
187 | info_dict_to_visualize['SuccessRate'] = num_of_success / agents_num
188 | info_dict_to_visualize['ExtraTime'] = ((all_step_num - all_desire_step_num) * DT) / num_of_success
189 | info_dict_to_visualize['ExtraDistance'] = (all_agent_total_dist - all_straight_dist) / num_of_success
190 | info_dict_to_visualize['AverageSpeed'] = all_agent_total_dist/all_step_num/DT
191 | info_dict_to_visualize['AverageCost'] = 1000*all_agent_total_time / all_step_num
192 |
193 | for obstacle in obstacles:
194 | obstacle_info_dict = {'position': obstacle.pos, 'shape': obstacle.shape, 'feature': obstacle.feature}
195 | info_dict_to_visualize['all_obstacle'].append(obstacle_info_dict)
196 |
197 | info_str = json.dumps(info_dict_to_visualize, indent=4)
198 | with open(log_save_dir + '/env_cfg.json', 'w') as json_file:
199 | json_file.write(info_str)
200 | json_file.close()
201 | step = 0
202 |
--------------------------------------------------------------------------------
/run_example/run_rvo3dDubins.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import json
4 | import math
5 | import random
6 | import pandas as pd
7 | import numpy as np
8 | from mamp.agents.agent import Agent
9 | from mamp.agents.obstacle import Obstacle
10 | from mamp.policies.sca.rvo3dDubinsPolicy import RVO3dDubinsPolicy
11 | from mamp.configs.config import DT
12 | from mamp.envs.mampenv import MACAEnv
13 | from mamp.util import mod2pi
14 |
15 |
16 | def set_circle_pos(agent_num):
17 | center = (0.0, 0.0)
18 | rad = 10.0
19 | k = 0
20 | agent_origin_pos = []
21 | agent_pos = []
22 | agent_goal = []
23 | for j in range(agent_num):
24 | agent_pos.append([center[0] + round(rad * np.cos(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
25 | center[1] + round(rad * np.sin(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
26 | 10.0,
27 | round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0, 0])
28 | agent_origin_pos.append((agent_pos[j][0], agent_pos[j][1]))
29 |
30 | for j in range(agent_num):
31 | agent_goal.append(agent_pos[(j + int(agent_num / 2)) % agent_num])
32 |
33 | return agent_pos, agent_goal, agent_origin_pos
34 |
35 |
36 | def set_random_pos(agent_num):
37 | agent_origin_pos = []
38 | agent_pos = []
39 | agent_goal = []
40 | z_value = 30.0
41 | r = 25
42 | for n in range(agent_num):
43 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r),
44 | random.uniform(0.0, 2*np.pi), 0.0, 0.0])
45 | agent_pos.append(pos)
46 | for n in range(agent_num):
47 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r),
48 | random.uniform(0.0, 2*np.pi), 0.0, 0.0])
49 | agent_goal.append(pos)
50 |
51 | for i in range(len(agent_pos)):
52 | agent_pos[i][2] = agent_pos[i][2] + z_value
53 | agent_goal[i][2] = agent_goal[i][2] + z_value
54 | return agent_pos, agent_goal, agent_origin_pos
55 |
56 |
57 | def build_agents():
58 | drone_num = 14
59 | init_vel = [0.0, 0.0, 0.0]
60 | radius = 0.5
61 | pref_speed = 1.0
62 | pos, goal, DroneOriginPos = set_circle_pos(drone_num)
63 | # pos, goal, DroneOriginPos = set_random_pos(drone_num)
64 | agents = []
65 | for i in range(len(pos)):
66 | agents.append(Agent(start_pos=pos[i], goal_pos=goal[i],
67 | vel=init_vel, radius=radius,
68 | pref_speed=pref_speed, policy=RVO3dDubinsPolicy,
69 | id=i, dt=DT))
70 | return agents
71 |
72 |
73 | def build_obstacles():
74 | """
75 | experiment for take-off and landing
76 | 1) obstacles is None for inter-agent obstacle.
77 | 2) obstacles is ob_list for static obstacle.
78 | """
79 | rad = 4.0
80 | center = (0.0, 0.0)
81 | obs_num = 8
82 | z_plane = 5.0
83 | ob_list = []
84 | for j in range(obs_num):
85 | ob_list.append([center[0] + round(rad * np.cos(2 * j * np.pi / obs_num), 2),
86 | center[1] + round(rad * np.sin(2 * j * np.pi / obs_num), 2),
87 | z_plane])
88 | obstacles = []
89 | # for i in range(len(ob_list)):
90 | # obstacles.append(Obstacle(pos=ob_list[i], shape_dict={'shape': "sphere", 'feature': 1.0}, id=i))
91 | return obstacles
92 |
93 |
94 | if __name__ == "__main__":
95 | total_time = 10000
96 | step = 0
97 |
98 | # Build agents and obstacles.
99 | agents = build_agents()
100 | agents_num = len(agents)
101 | obstacles = build_obstacles()
102 |
103 | env = MACAEnv()
104 | env.set_agents(agents, obstacles=obstacles)
105 |
106 | cost_time = 0.0
107 | start_time = time.time()
108 | while step * DT < total_time:
109 | print(step, '')
110 |
111 | actions = {}
112 | which_agents_done = env.step(actions)
113 |
114 | # Is arrived.
115 | if which_agents_done:
116 | print("All agents finished!", step)
117 | end_time = time.time()
118 | cost_time = end_time - start_time
119 | print('cost time', cost_time)
120 | break
121 | step += 1
122 |
123 | log_save_dir = os.path.dirname(os.path.realpath(__file__)) + '/../visualization/rvo3d_dubins/log/'
124 | os.makedirs(log_save_dir, exist_ok=True)
125 | # trajectory
126 | writer = pd.ExcelWriter(log_save_dir + '/trajs.xlsx')
127 | for a in agents:
128 | a.history_info.to_excel(writer, sheet_name='agent' + str(a.id))
129 | writer.save()
130 |
131 | # scenario information
132 | info_dict_to_visualize = {
133 | 'all_agent_info': [],
134 | 'all_obstacle': [],
135 | 'all_compute_time': 0.0,
136 | 'all_straight_distance': 0.0,
137 | 'all_distance': 0.0,
138 | 'successful_num': 0,
139 | 'all_desire_step_num': 0,
140 | 'all_step_num': 0,
141 | 'SuccessRate': 0.0,
142 | 'ExtraTime': 0.0,
143 | 'ExtraDistance': 0.0,
144 | 'AverageSpeed': 0.0,
145 | 'AverageCost': 0.0
146 | }
147 | all_straight_dist = 0.0
148 | all_agent_total_time = 0.0
149 | all_agent_total_dist = 0.0
150 | num_of_success = 0
151 | all_desire_step_num = 0
152 | all_step_num = 0
153 |
154 | SuccessRate = 0.0
155 | ExtraTime = 0.0
156 | ExtraDistance = 0.0
157 | AverageSpeed = 0.0
158 | AverageCost = 0.0
159 |
160 | for agent in agents:
161 | agent_info_dict = {'id': agent.id, 'gp': agent.group, 'radius': agent.radius,
162 | 'goal_pos': agent.goal_global_frame.tolist()}
163 | info_dict_to_visualize['all_agent_info'].append(agent_info_dict)
164 | if not agent.is_collision and not agent.is_out_of_max_time:
165 | num_of_success += 1
166 | all_agent_total_time += agent.total_time
167 | all_straight_dist += agent.straight_path_length
168 | all_agent_total_dist += agent.total_dist
169 | all_desire_step_num += agent.desire_steps
170 | all_step_num += agent.step_num
171 |
172 | info_dict_to_visualize['all_compute_time'] = all_agent_total_time
173 | info_dict_to_visualize['all_straight_distance'] = all_straight_dist
174 | info_dict_to_visualize['all_distance'] = all_agent_total_dist
175 | info_dict_to_visualize['successful_num'] = num_of_success
176 | info_dict_to_visualize['all_desire_step_num'] = all_desire_step_num
177 | info_dict_to_visualize['all_step_num'] = all_step_num
178 |
179 | info_dict_to_visualize['SuccessRate'] = num_of_success / agents_num
180 | info_dict_to_visualize['ExtraTime'] = ((all_step_num - all_desire_step_num) * DT) / num_of_success
181 | info_dict_to_visualize['ExtraDistance'] = (all_agent_total_dist - all_straight_dist) / num_of_success
182 | info_dict_to_visualize['AverageSpeed'] = all_agent_total_dist/all_step_num/DT
183 | info_dict_to_visualize['AverageCost'] = 1000*all_agent_total_time / all_step_num
184 |
185 | for obstacle in obstacles:
186 | obstacle_info_dict = {'position': obstacle.pos, 'shape': obstacle.shape, 'feature': obstacle.feature}
187 | info_dict_to_visualize['all_obstacle'].append(obstacle_info_dict)
188 |
189 | info_str = json.dumps(info_dict_to_visualize, indent=4)
190 | with open(log_save_dir + '/env_cfg.json', 'w') as json_file:
191 | json_file.write(info_str)
192 | json_file.close()
193 | step = 0
194 |
--------------------------------------------------------------------------------
/run_example/run_sca.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import json
4 | import math
5 | import random
6 | import pandas as pd
7 | import numpy as np
8 | from mamp.agents.agent import Agent
9 | from mamp.agents.obstacle import Obstacle
10 | from mamp.policies.sca.scaPolicy import SCAPolicy
11 | from mamp.configs.config import DT
12 | from mamp.envs.mampenv import MACAEnv
13 | from mamp.util import mod2pi
14 | from mamp.read_map import read_obstacle
15 |
16 |
17 | def set_circle_pos(center, rad, agent_num):
18 | k = 0
19 | agent_pos = []
20 | agent_goal = []
21 | for j in range(agent_num):
22 | agent_pos.append([center[0] + round(rad * np.cos(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
23 | center[1] + round(rad * np.sin(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
24 | 10.0,
25 | round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0, 0])
26 |
27 | for j in range(agent_num):
28 | agent_goal.append(agent_pos[(j + int(agent_num / 2)) % agent_num])
29 |
30 | return agent_pos, agent_goal
31 |
32 |
33 | def set_random_pos(agent_num):
34 | agent_pos = []
35 | agent_goal = []
36 | z_value = 30.0
37 | r = 25
38 | for n in range(agent_num):
39 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r),
40 | random.uniform(0.0, 2 * np.pi), 0.0, 0.0])
41 | agent_pos.append(pos)
42 | for n in range(agent_num):
43 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r),
44 | random.uniform(0.0, 2 * np.pi), 0.0, 0.0])
45 | agent_goal.append(pos)
46 |
47 | for i in range(len(agent_pos)):
48 | agent_pos[i][2] = agent_pos[i][2] + z_value
49 | agent_goal[i][2] = agent_goal[i][2] + z_value
50 | return agent_pos, agent_goal
51 |
52 |
53 | def set_takeoff_landing_pos(agent_num):
54 | rad = 4.0
55 | center = (0.0, 0.0)
56 | landing_num = agent_num - int(agent_num / 2)
57 | takeoff_num = int(agent_num / 2)
58 | z_plane_landing, z_plane_takeoff = 10.0, 0.0
59 | agent_pos, agent_goal = [], []
60 |
61 | # takeoff
62 | for j in range(landing_num):
63 | agent_pos.append([center[0] + round(rad * np.cos(2 * j * np.pi / landing_num), 2),
64 | center[1] + round(rad * np.sin(2 * j * np.pi / landing_num), 2),
65 | z_plane_landing,
66 | round(np.pi / 2, 5), 0, 0])
67 | for j in range(landing_num, agent_num):
68 | agent_pos.append([center[0] + round(rad * np.cos(2 * j * np.pi / takeoff_num), 2),
69 | center[1] + round(rad * np.sin(2 * j * np.pi / takeoff_num), 2),
70 | z_plane_takeoff,
71 | round(-np.pi / 2, 5), 0, 0])
72 |
73 | # # landing
74 | for j in range(landing_num):
75 | index = j + landing_num
76 | agent_goal.append(agent_pos[index])
77 | for j in range(landing_num, agent_num):
78 | index = j - takeoff_num
79 | agent_goal.append(agent_pos[index])
80 |
81 | return agent_pos, agent_goal
82 |
83 |
84 | def spawn_n_drones(center, rad, drone_num, environment):
85 | pos = []
86 | goal = []
87 | if environment == 'exp1':
88 | height = 10
89 | elif environment == 'exp3':
90 | height = 2
91 | else:
92 | height = 10
93 | for i in range(drone_num):
94 | pos.append([center[0] + rad * math.cos(2 * i * np.pi / drone_num),
95 | center[1] + rad * math.sin(2 * i * np.pi / drone_num),
96 | height,
97 | np.deg2rad(-90 - i * 360 / drone_num), 0, 0])
98 | for i in range(drone_num):
99 | goal.append([center[0] - rad * math.cos(2 * i * np.pi / drone_num),
100 | center[1] - rad * math.sin(2 * i * np.pi / drone_num),
101 | height,
102 | np.deg2rad(90 - i * 360 / drone_num), 0, 0])
103 | return pos, goal
104 |
105 |
106 | def build_agents():
107 | """
108 | exp1: using set_circle_pos(drone_num); circle benchmark with returning; drone_num is 14.
109 | exp2: using set_takeoff_landing_pos(drone_num); landing and take-off, drone_num is 16.
110 | exp3: using spawn_n_drones(); low altitude search, drone_num is 16.
111 | """
112 | init_vel = [0.0, 0.0, 0.0]
113 | radius = 0.5
114 | pref_speed = 1.0
115 | drone_num = 16
116 | pos, goal = set_circle_pos(center=(0, 0), rad=10.0, agent_num=drone_num)
117 | # pos, goal = set_takeoff_landing_pos(drone_num)
118 | # pos, goal = spawn_n_drones(center=(35, 30), rad=10.0, drone_num=drone_num, environment="exp3")
119 | # pos, goal = set_random_pos(drone_num)
120 | agents = []
121 | for i in range(len(pos)):
122 | agents.append(Agent(start_pos=pos[i], goal_pos=goal[i],
123 | vel=init_vel, radius=radius,
124 | pref_speed=pref_speed, policy=SCAPolicy,
125 | id=i, dt=DT))
126 | return agents
127 |
128 |
129 | def build_obstacles():
130 | """
131 | experiment2: take-off and landing
132 | 1) obstacles is None for inter-agent obstacle.
133 | 2) obstacles is ob_list for static obstacle.
134 |
135 | experiment3: low altitude search
136 | all obstacles is readed by the read_tree_obstacle function in the map.binvox file
137 | """
138 | # ----------------------exp2------------------------
139 | rad = 4.0
140 | center = (0.0, 0.0)
141 | obs_num = 8
142 | z_plane = 5.0
143 | ob_list = []
144 | for j in range(obs_num):
145 | ob_list.append([center[0] + round(rad * np.cos(2 * j * np.pi / obs_num), 2),
146 | center[1] + round(rad * np.sin(2 * j * np.pi / obs_num), 2),
147 | z_plane])
148 | objs = []
149 | for i in range(len(ob_list)):
150 | objs.append(Obstacle(pos=ob_list[i], shape_dict={'shape': "sphere", 'feature': 1.0}, id=i))
151 |
152 | # ----------------------exp3------------------------
153 | # map_path = '../visualization/map/map.binvox'
154 | # objs = read_obstacle(center=(35, 30), environ="exp3", obs_path=map_path)
155 | return objs
156 |
157 |
158 | if __name__ == "__main__":
159 |
160 | total_time = 10000
161 | step = 0
162 |
163 | # Build agents and obstacles.
164 | agents = build_agents()
165 | agents_num = len(agents)
166 |
167 | obstacles = build_obstacles()
168 |
169 | env = MACAEnv()
170 | env.set_agents(agents, obstacles=obstacles)
171 |
172 | cost_time = 0.0
173 | start_time = time.time()
174 | while step * DT < total_time:
175 | print(step, '')
176 |
177 | actions = {}
178 | which_agents_done = env.step(actions)
179 |
180 | # Is arrived.
181 | if which_agents_done:
182 | print("All agents finished!", step)
183 | end_time = time.time()
184 | cost_time = end_time - start_time
185 | print('cost time', cost_time)
186 | break
187 | step += 1
188 |
189 | log_save_dir = os.path.dirname(os.path.realpath(__file__)) + '/../visualization/sca/log/'
190 | os.makedirs(log_save_dir, exist_ok=True)
191 |
192 | # trajectory
193 | writer = pd.ExcelWriter(log_save_dir + '/trajs.xlsx')
194 | for a in agents:
195 | a.history_info.to_excel(writer, sheet_name='agent' + str(a.id))
196 | writer.save()
197 |
198 | # scenario information
199 | info_dict_to_visualize = {
200 | 'all_agent_info': [],
201 | 'all_obstacle': [],
202 | 'all_compute_time': 0.0,
203 | 'all_straight_distance': 0.0,
204 | 'all_distance': 0.0,
205 | 'successful_num': 0,
206 | 'all_desire_step_num': 0,
207 | 'all_step_num': 0,
208 | 'SuccessRate': 0.0,
209 | 'ExtraTime': 0.0,
210 | 'ExtraDistance': 0.0,
211 | 'AverageSpeed': 0.0,
212 | 'AverageCost': 0.0
213 | }
214 | all_straight_dist = 0.0
215 | all_agent_total_time = 0.0
216 | all_agent_total_dist = 0.0
217 | num_of_success = 0
218 | all_desire_step_num = 0
219 | all_step_num = 0
220 |
221 | SuccessRate = 0.0
222 | ExtraTime = 0.0
223 | ExtraDistance = 0.0
224 | AverageSpeed = 0.0
225 | AverageCost = 0.0
226 |
227 | for agent in agents:
228 | agent_info_dict = {'id': agent.id, 'gp': agent.group, 'radius': agent.radius,
229 | 'goal_pos': agent.goal_global_frame.tolist()}
230 | info_dict_to_visualize['all_agent_info'].append(agent_info_dict)
231 | if not agent.is_collision and not agent.is_out_of_max_time:
232 | num_of_success += 1
233 | all_agent_total_time += agent.total_time
234 | all_straight_dist += agent.straight_path_length
235 | all_agent_total_dist += agent.total_dist
236 | all_desire_step_num += agent.desire_steps
237 | all_step_num += agent.step_num
238 |
239 | info_dict_to_visualize['all_compute_time'] = all_agent_total_time
240 | info_dict_to_visualize['all_straight_distance'] = all_straight_dist
241 | info_dict_to_visualize['all_distance'] = all_agent_total_dist
242 | info_dict_to_visualize['successful_num'] = num_of_success
243 | info_dict_to_visualize['all_desire_step_num'] = all_desire_step_num
244 | info_dict_to_visualize['all_step_num'] = all_step_num
245 |
246 | info_dict_to_visualize['SuccessRate'] = num_of_success / agents_num
247 | info_dict_to_visualize['ExtraTime'] = ((all_step_num - all_desire_step_num) * DT) / num_of_success
248 | info_dict_to_visualize['ExtraDistance'] = (all_agent_total_dist - all_straight_dist) / num_of_success
249 | info_dict_to_visualize['AverageSpeed'] = all_agent_total_dist / all_step_num / DT
250 | info_dict_to_visualize['AverageCost'] = 1000 * all_agent_total_time / all_step_num
251 |
252 | for obstacle in obstacles:
253 | obstacle_info_dict = {'position': obstacle.pos, 'shape': obstacle.shape, 'feature': obstacle.feature}
254 | info_dict_to_visualize['all_obstacle'].append(obstacle_info_dict)
255 |
256 | info_str = json.dumps(info_dict_to_visualize, indent=4)
257 | with open(log_save_dir + '/env_cfg.json', 'w') as json_file:
258 | json_file.write(info_str)
259 | json_file.close()
260 | step = 0
261 |
--------------------------------------------------------------------------------
/run_example/run_srvo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import json
4 | import math
5 | import random
6 | import pandas as pd
7 | import numpy as np
8 | from mamp.agents.agent import Agent
9 | from mamp.agents.obstacle import Obstacle
10 | from mamp.policies.srvo3dPolicy import SRVO3DPolicy
11 | from mamp.configs.config import DT
12 | from mamp.envs.mampenv import MACAEnv
13 | from mamp.util import mod2pi, l3norm
14 |
15 |
16 | def set_circle_pos(agent_num):
17 | center = (0.0, 0.0)
18 | rad = 18.0
19 | k = 0
20 | agent_origin_pos = []
21 | agent_pos = []
22 | agent_goal = []
23 | for j in range(agent_num):
24 | agent_pos.append([center[0] + round(rad * np.cos(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
25 | center[1] + round(rad * np.sin(2 * j * np.pi / agent_num + k * np.pi / 4), 2),
26 | 10.0,
27 | round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0, 0])
28 | agent_origin_pos.append((agent_pos[j][0], agent_pos[j][1]))
29 |
30 | for j in range(agent_num):
31 | agent_goal.append(agent_pos[(j + int(agent_num / 2)) % agent_num][:3]
32 | + [round(mod2pi(2 * j * np.pi / agent_num + k * np.pi / 4 + np.pi), 5), 0.0, 0.0])
33 | return agent_pos, agent_goal, agent_origin_pos
34 |
35 |
36 | def set_sphere(num_N):
37 | agent_origin_pos = []
38 | agent_pos = []
39 | agent_goal = []
40 | rad = 25.0
41 | z_value = 30.0
42 | param_phi = (math.sqrt(5.0) - 1.0) / 2.0
43 | for n in range(1, num_N + 1):
44 | z_n = (2 * n - 1) / num_N - 1
45 | x_n = math.sqrt(1 - z_n ** 2) * math.cos(2 * math.pi * n * param_phi)
46 | y_n = math.sqrt(1 - z_n ** 2) * math.sin(2 * math.pi * n * param_phi)
47 | pos = np.array([rad * x_n, rad * y_n, rad * z_n, 0.0, 0.0, 0.0])
48 | agent_pos.append(pos)
49 | agent_goal.append(-pos)
50 | for i in range(len(agent_pos)):
51 | agent_pos[i][2] = agent_pos[i][2] + z_value
52 | agent_goal[i][2] = agent_goal[i][2] + z_value
53 |
54 | return agent_pos, agent_goal, agent_origin_pos
55 |
56 |
57 | def set_random_pos(agent_num):
58 | agent_origin_pos = []
59 | agent_pos = []
60 | agent_goal = []
61 | z_value = 30.0
62 | r = 15.0
63 | for n in range(agent_num):
64 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r), 0.0, 0.0, 0.0])
65 | agent_pos.append(pos)
66 | for n in range(agent_num):
67 | pos = np.array([random.uniform(-r, r), random.uniform(-r, r), random.uniform(-r, r), 0.0, 0.0, 0.0])
68 | agent_goal.append(pos)
69 |
70 | for i in range(len(agent_pos)):
71 | agent_pos[i][2] = agent_pos[i][2] + z_value
72 | agent_goal[i][2] = agent_goal[i][2] + z_value
73 | return agent_pos, agent_goal, agent_origin_pos
74 |
75 |
76 | def build_agents():
77 | # build agents
78 | drone_num = 100
79 | init_vel = [0.0, 0.0, 0.0]
80 | radius = 0.5
81 | pref_speed = 1.0
82 | pos, goal, DroneOriginPos = set_circle_pos(drone_num)
83 | # pos, goal, DroneOriginPos = set_sphere(drone_num)
84 | # pos, goal, DroneOriginPos = set_random_pos(drone_num)
85 | agents = []
86 | for i in range(drone_num):
87 | agents.append(Agent(start_pos=pos[i], goal_pos=goal[i],
88 | vel=init_vel, radius=radius,
89 | pref_speed=pref_speed, policy=SRVO3DPolicy,
90 | id=i, dt=DT))
91 | return agents
92 |
93 |
94 | def build_obstacles():
95 | ob_list = [[-7.0, -10, 10.0]]
96 | obstacles = []
97 | # obstacles.append(Obstacle(pos=ob_list[0], shape_dict={'shape': "cube", 'feature': (2.0, 2.0, 2.0)}, id=0))
98 | return obstacles
99 |
100 |
101 | if __name__ == "__main__":
102 | total_time = 10000
103 | step = 0
104 |
105 | # Build agents and obstacles.
106 | agents = build_agents()
107 | obstacles = build_obstacles()
108 |
109 | env = MACAEnv()
110 | env.set_agents(agents, obstacles=obstacles)
111 |
112 | # Test for SRVO3D
113 | agents_num = len(agents)
114 | cost_time = 0.0
115 | start_time = time.time()
116 | while step * DT < total_time:
117 | print(step, '')
118 |
119 | actions = {}
120 | which_agents_done = env.step(actions)
121 |
122 | # Is arrived.
123 | if which_agents_done:
124 | print("All agents finished!", step)
125 | end_time = time.time()
126 | cost_time = end_time - start_time
127 | print('cost time', cost_time)
128 | break
129 | step += 1
130 |
131 | log_save_dir = os.path.dirname(os.path.realpath(__file__)) + '/../visualization/srvo3d/log/'
132 | os.makedirs(log_save_dir, exist_ok=True)
133 | # trajectory
134 | writer = pd.ExcelWriter(log_save_dir + '/trajs.xlsx')
135 | for a in agents:
136 | a.history_info.to_excel(writer, sheet_name='agent' + str(a.id))
137 | writer.save()
138 |
139 | # scenario information
140 | info_dict_to_visualize = {
141 | 'all_agent_info': [],
142 | 'all_obstacle': [],
143 | 'all_compute_time': 0.0,
144 | 'all_straight_distance': 0.0,
145 | 'all_distance': 0.0,
146 | 'successful_num': 0,
147 | 'all_desire_step_num': 0,
148 | 'all_step_num': 0,
149 | 'SuccessRate': 0.0,
150 | 'ExtraTime': 0.0,
151 | 'ExtraDistance': 0.0,
152 | 'AverageSpeed': 0.0,
153 | 'AverageCost': 0.0
154 | }
155 | all_straight_dist = 0.0
156 | all_agent_total_time = 0.0
157 | all_agent_total_dist = 0.0
158 | num_of_success = 0
159 | all_desire_step_num = 0
160 | all_step_num = 0
161 |
162 | SuccessRate = 0.0
163 | ExtraTime = 0.0
164 | ExtraDistance = 0.0
165 | AverageSpeed = 0.0
166 | AverageCost = 0.0
167 |
168 | for agent in agents:
169 | agent_info_dict = {'id': agent.id, 'gp': agent.group, 'radius': agent.radius,
170 | 'goal_pos': agent.goal_global_frame.tolist()}
171 | info_dict_to_visualize['all_agent_info'].append(agent_info_dict)
172 | if not agent.is_collision and not agent.is_out_of_max_time:
173 | num_of_success += 1
174 | all_agent_total_time += agent.total_time
175 | all_straight_dist += agent.straight_path_length
176 | all_agent_total_dist += agent.total_dist
177 | all_desire_step_num += agent.desire_steps
178 | all_step_num += agent.step_num
179 |
180 | info_dict_to_visualize['all_compute_time'] = all_agent_total_time
181 | info_dict_to_visualize['all_straight_distance'] = all_straight_dist
182 | info_dict_to_visualize['all_distance'] = all_agent_total_dist
183 | info_dict_to_visualize['successful_num'] = num_of_success
184 | info_dict_to_visualize['all_desire_step_num'] = all_desire_step_num
185 | info_dict_to_visualize['all_step_num'] = all_step_num
186 |
187 | info_dict_to_visualize['SuccessRate'] = num_of_success/agents_num
188 | info_dict_to_visualize['ExtraTime'] = ((all_step_num-all_desire_step_num)*DT)/num_of_success
189 | info_dict_to_visualize['ExtraDistance'] = (all_agent_total_dist-all_straight_dist)/num_of_success
190 | info_dict_to_visualize['AverageSpeed'] = all_agent_total_dist/all_step_num/DT
191 | info_dict_to_visualize['AverageCost'] = 1000*all_agent_total_time / all_step_num
192 |
193 | for obstacle in obstacles:
194 | obstacle_info_dict = {'position': obstacle.pos, 'shape': obstacle.shape, 'feature': obstacle.feature}
195 | info_dict_to_visualize['all_obstacle'].append(obstacle_info_dict)
196 |
197 | info_str = json.dumps(info_dict_to_visualize, indent=4)
198 | with open(log_save_dir + '/env_cfg.json', 'w') as json_file:
199 | json_file.write(info_str)
200 | json_file.close()
201 | step = 0
202 |
--------------------------------------------------------------------------------
/visualization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/__init__.py
--------------------------------------------------------------------------------
/visualization/draw_episode.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import glob
4 | import imageio
5 | import numpy as np
6 | import pandas as pd
7 | from plt3d import plot_episode
8 |
9 |
10 | case_name = 'sca' # Update the file name for visualization trajectories in different policies.
11 |
12 | abs_path = os.path.abspath('.')
13 | plot_save_dir = abs_path + '/' + case_name + '/'
14 | log_save_dir = abs_path + '/' + case_name + '/'
15 |
16 |
17 | def get_agent_traj(info_file, traj_file):
18 | with open(info_file, "r") as f:
19 | json_info = json.load(f)
20 |
21 | traj_list = []
22 | step_num_list = []
23 | agents_info = json_info['all_agent_info']
24 | obstacles_info = json_info['all_obstacle']
25 | for agent_info in agents_info:
26 | agent_id = agent_info['id']
27 |
28 | df = pd.read_excel(traj_file, index_col=0, sheet_name='agent' + str(agent_id))
29 | step_num_list.append(df.shape[0])
30 | traj_list.append(df.to_dict('list'))
31 |
32 | return agents_info, traj_list, step_num_list, obstacles_info
33 |
34 |
35 | def png_to_gif(general_fig_name, last_fig_name, animation_filename, video_filename):
36 | all_filenames = plot_save_dir + general_fig_name
37 | last_filename = plot_save_dir + last_fig_name
38 | print(all_filenames)
39 |
40 | # Dump all those images into a gif (sorted by timestep).
41 | filenames = glob.glob(all_filenames)
42 | filenames.sort()
43 | images = []
44 | for filename in filenames:
45 | images.append(imageio.imread(filename))
46 | os.remove(filename)
47 | for i in range(10):
48 | images.append(imageio.imread(last_filename))
49 |
50 | # Save the gif in a new animations sub-folder.
51 | animation_save_dir = plot_save_dir + "animations/"
52 | os.makedirs(animation_save_dir, exist_ok=True)
53 | video_filename = animation_save_dir + video_filename
54 | animation_filename = animation_save_dir + animation_filename
55 | imageio.mimsave(animation_filename, images, )
56 |
57 | # Convert .gif to .mp4.
58 | try:
59 | import moviepy.editor as mp
60 | except imageio.core.fetching.NeedDownloadError:
61 | imageio.plugins.ffmpeg.download()
62 | import moviepy.editor as mp
63 | clip = mp.VideoFileClip(animation_filename)
64 | clip.write_videofile(video_filename)
65 |
66 |
67 | info_file = log_save_dir + 'log/env_cfg.json'
68 | traj_file = log_save_dir + 'log/trajs.xlsx'
69 |
70 | agents_info, traj_list, step_num_list, obstacles_info = get_agent_traj(info_file, traj_file)
71 |
72 | agent_num = len(step_num_list)
73 | base_fig_name_style = "{test_case}_{policy}_{num_agents}agents"
74 | base_fig_name = base_fig_name_style.format(policy=case_name, num_agents=agent_num, test_case=str(0).zfill(3))
75 | general_fig_name = base_fig_name + '_*.png'
76 | last_fig_name = base_fig_name + '.png'
77 | animation_filename = base_fig_name + '.gif'
78 | video_filename = base_fig_name + '.mp4'
79 |
80 | plot_episode(obstacles_info, agents_info, traj_list, step_num_list, plot_save_dir, base_fig_name, last_fig_name)
81 |
82 | png_to_gif(general_fig_name, last_fig_name, animation_filename, video_filename)
83 |
84 |
85 |
--------------------------------------------------------------------------------
/visualization/draw_path.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import numpy as np
4 | import open3d as o3d
5 | from math import sqrt, pi, cos, sin
6 | from openpyxl import load_workbook
7 | from typing import List
8 |
9 |
10 | case_name = 'sca'
11 |
12 | abs_path = os.path.abspath('.')
13 | log_save_dir = abs_path + '/' + case_name + '/'
14 |
15 |
16 | def load_path_data(filename, agents_num) -> dict:
17 | agent_trajs = dict()
18 | wb = load_workbook(filename)
19 | for index in range(agents_num):
20 | exec('agent' + str(index) + '= read_sheet(wb, \'agent' + str(index) + '\')')
21 | exec('agent_trajs[\'agent' + str(index) + '\'] = dict2array_pos(agent' + str(index) + ')')
22 | return agent_trajs
23 |
24 |
25 | def read_sheet(wb, sheet_name) -> List[dict]:
26 | sheet = wb[sheet_name]
27 | row_len = sheet.max_row
28 | column_len = sheet.max_column
29 | data = []
30 | for row in range(2, row_len + 1):
31 | sub_data = dict()
32 | sub_data['index'] = int(row) - 2
33 | for column in range(2, column_len + 1):
34 | sub_data[sheet.cell(1, column).value] = sheet.cell(row, column).value
35 | data.append(sub_data)
36 | return data
37 |
38 |
39 | def dict2array_pos(data: list) -> np.array:
40 | data_list = []
41 | for i in range(len(data)):
42 | data_list.append([data[i]['pos_x'], data[i]['pos_y'], data[i]['pos_z']])
43 | return np.array(data_list)
44 |
45 |
46 | def points_obs(obstacles: list) -> np.array:
47 | obs_list = list()
48 | for obs in obstacles:
49 | obs_list.append(obs.pos_global_frame)
50 | return np.array(obs_list)
51 |
52 |
53 | def draw_sphere(center, rad=1.0):
54 | num_N = int(rad*512)
55 | obj_pos = []
56 | param_phi = (sqrt(5.0) - 1.0) / 2.0
57 | for i in range(1, num_N + 1):
58 | z_n = (2 * i - 1) / num_N - 1
59 | x_n = sqrt(1 - z_n ** 2) * cos(2 * pi * i * param_phi)
60 | y_n = sqrt(1 - z_n ** 2) * sin(2 * pi * i * param_phi)
61 | pos = [rad * x_n + center[0], rad * y_n + center[1], rad * z_n + center[2]]
62 | obj_pos.append(pos)
63 |
64 | return obj_pos
65 |
66 |
67 | def read_obstacles_pos(obstacles_information):
68 | obs_list = list()
69 | for i in range(len(obstacles_information)):
70 | feature = obstacles_information[i]['feature']
71 | position = obstacles_information[i]['position']
72 | if feature <= 0.2:
73 | obs_list.append(position)
74 | else:
75 | obs_list += draw_sphere(position, rad=feature)
76 | return np.array(obs_list)
77 |
78 |
79 | def get_agent_info(filename):
80 | with open(filename, "r") as f:
81 | json_info = json.load(f)
82 |
83 | agents_information = json_info['all_agent_info']
84 | obstacles_information = json_info['all_obstacle']
85 |
86 | return agents_information, obstacles_information
87 |
88 |
89 | if __name__ == '__main__':
90 | info_file = log_save_dir + 'log/env_cfg.json'
91 | traj_file = log_save_dir + 'log/trajs.xlsx'
92 |
93 | agents_info, obstacles_info = get_agent_info(info_file)
94 | agent_num = len(agents_info)
95 | uav_trajs = load_path_data(traj_file, agent_num)
96 |
97 | pcd_list = list()
98 | for n in range(len(uav_trajs)):
99 | pcd = o3d.geometry.PointCloud()
100 | pcd.points = o3d.utility.Vector3dVector(uav_trajs['agent' + str(n)])
101 | pcd_list.append(pcd)
102 |
103 | obstacles_pos = read_obstacles_pos(obstacles_info)
104 | pcd = o3d.geometry.PointCloud()
105 | pcd.points = o3d.utility.Vector3dVector(obstacles_pos)
106 | pcd_list.append(pcd)
107 |
108 | o3d.visualization.draw_geometries([pcd for pcd in pcd_list])
109 |
--------------------------------------------------------------------------------
/visualization/figs/c1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/c1.png
--------------------------------------------------------------------------------
/visualization/figs/c2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/c2.png
--------------------------------------------------------------------------------
/visualization/figs/example1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/example1.gif
--------------------------------------------------------------------------------
/visualization/figs/example21.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/example21.gif
--------------------------------------------------------------------------------
/visualization/figs/example22.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/example22.gif
--------------------------------------------------------------------------------
/visualization/figs/example3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/example3.png
--------------------------------------------------------------------------------
/visualization/figs/figexp11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/figexp11.png
--------------------------------------------------------------------------------
/visualization/figs/figexp12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/figexp12.png
--------------------------------------------------------------------------------
/visualization/figs/figexp21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/figexp21.png
--------------------------------------------------------------------------------
/visualization/figs/figexp22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/figexp22.png
--------------------------------------------------------------------------------
/visualization/figs/figexp31.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/figexp31.png
--------------------------------------------------------------------------------
/visualization/figs/figexp32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/figs/figexp32.png
--------------------------------------------------------------------------------
/visualization/map/map.binvox:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wuuya1/SCA/3021d057d5759b577539a1d284281514034f92e3/visualization/map/map.binvox
--------------------------------------------------------------------------------
/visualization/plt3d.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import glob
4 | import imageio
5 | import numpy as np
6 | import matplotlib.cm as cmx
7 | import matplotlib.colors as colors
8 | import pandas as pd
9 | import matplotlib.pyplot as plt
10 | from mpl_toolkits.mplot3d import Axes3D
11 | from math import pi
12 |
13 | from vis_util import get_uav_model, draw_agent_3d, draw_sphere
14 |
15 | plt_colors = [[0.8500, 0.3250, 0.0980], [0.0, 0.4470, 0.7410], [0.4660, 0.6740, 0.1880],
16 | [0.4940, 0.1840, 0.5560],
17 | [0.9290, 0.6940, 0.1250], [0.3010, 0.7450, 0.9330], [0.6350, 0.0780, 0.1840]]
18 |
19 |
20 | def draw_traj_3d(ax, agents_info, obstacles_info, agents_traj_list, step_num_list, current_step):
21 | for idx, agent_traj in enumerate(agents_traj_list):
22 | current_step_temp = current_step
23 | color_ind = idx % len(plt_colors)
24 | plt_color = plt_colors[color_ind]
25 |
26 | ag_step_num = step_num_list[idx]
27 | if current_step >= ag_step_num:
28 | current_step = ag_step_num - 1
29 | pos_x = agent_traj['pos_x']
30 | pos_y = agent_traj['pos_y']
31 | pos_z = agent_traj['pos_z']
32 | alpha = agent_traj['alpha']
33 | beta = agent_traj['beta']
34 | gamma = agent_traj['gamma']
35 | vel_x = agent_traj['vel_x']
36 | vel_y = agent_traj['vel_y']
37 | vel_z = agent_traj['vel_z']
38 | goal = agents_info[idx]['goal_pos']
39 | agent_radius = agents_info[idx]['radius']
40 |
41 | plt.plot(pos_x[:current_step], pos_y[:current_step], pos_z[:current_step], color=plt_color, ls='-', linewidth=2)
42 | pos_global_frame = [pos_x[current_step], pos_y[current_step], pos_z[current_step]]
43 | vel = [vel_x[current_step], vel_y[current_step], vel_z[current_step]]
44 | heading_global_frame = [alpha[current_step], beta[current_step], gamma[current_step]]
45 | current_step = current_step_temp
46 |
47 | # -------Use sphere model-------
48 | c_map = get_cmap(len(agents_traj_list))
49 | # draw_agent(ax, pos_global_frame, goal, vel, radius, c_map(idx), idx)
50 |
51 | # -------Use uav_model-------
52 | my_agent_model = get_uav_model(agent_radius, tall=0.1)
53 |
54 | # -------Use car_model-------
55 | # my_agent_model = get_car_model(agent_radius, height=0.3)
56 |
57 | draw_agent_3d(ax=ax,
58 | pos_global_frame=pos_global_frame,
59 | goal=goal,
60 | c_map=c_map(idx),
61 | heading_global_frame=heading_global_frame,
62 | my_agent_model=my_agent_model, a_id=idx)
63 |
64 | for obstacle_dict in obstacles_info:
65 | if obstacle_dict['shape'] == 'sphere':
66 | draw_sphere(ax, obstacle_dict)
67 |
68 |
69 | def set_ax_parameter(ax):
70 | ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([1, 1, 0.6, 1]))
71 | ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
72 | ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
73 | ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
74 | ax.view_init(32, 45)
75 |
76 | ax.set_xlim3d(-15, 15)
77 | ax.set_ylim3d(-15, 15)
78 | ax.set_zlim3d(-5, 20)
79 | ax.set_xlabel('X(m)')
80 | ax.set_ylabel('Y(m)')
81 | ax.set_zlabel('Z(m)')
82 |
83 |
84 | def plot_save_one_pic(obstacles_info, agents_info, agents_traj_list, step_num_list, filename, current_step):
85 | fig_size = (10, 8)
86 | fig = plt.figure(0)
87 | fig.set_size_inches(fig_size[0], fig_size[1])
88 |
89 | ax = Axes3D(fig)
90 |
91 | set_ax_parameter(ax)
92 |
93 | plt.grid(alpha=0.2)
94 |
95 | draw_traj_3d(ax, agents_info, obstacles_info, agents_traj_list, step_num_list, current_step)
96 | plt.savefig(filename, bbox_inches="tight")
97 |
98 | if current_step == 0: plt.show()
99 | if current_step == max(step_num_list): plt.show()
100 | plt.close()
101 |
102 |
103 | def plot_episode(obstacles_info, agents_info, traj_list, step_num_list, plot_save_dir, base_fig_name, last_fig_name):
104 | current_step = 0
105 | num_agents = len(step_num_list)
106 | total_step = max(step_num_list)
107 | print('num_agents:', num_agents, 'total_step:', total_step)
108 | while current_step < total_step:
109 | fig_name = base_fig_name + "_{:05}".format(current_step) + '.png'
110 | filename = plot_save_dir + fig_name
111 | plot_save_one_pic(obstacles_info, agents_info, traj_list, step_num_list, filename, current_step)
112 | print(filename)
113 | current_step += 3
114 | filename = plot_save_dir + last_fig_name
115 | plot_save_one_pic(obstacles_info, agents_info, traj_list, step_num_list, filename, total_step)
116 |
117 |
118 | def get_cmap(N):
119 | """Returns a function that maps each index in 0, 1, ... N-1 to a distinct RGB color."""
120 | color_norm = colors.Normalize(vmin=0, vmax=N - 1)
121 | scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')
122 |
123 | def map_index_to_rgb_color(index):
124 | return scalar_map.to_rgba(index)
125 |
126 | return map_index_to_rgb_color
127 |
--------------------------------------------------------------------------------
/visualization/vis_util.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | from mpl_toolkits.mplot3d import Axes3D
5 | from mpl_toolkits.mplot3d.art3d import Poly3DCollection
6 | from math import sin, cos, atan2, sqrt, pow
7 |
8 |
9 | # models
10 | def get_car_model(length=2.0, width=1.5, height=1.0):
11 | car_model = [[-width / 2., length / 2, height / 2], [-width / 2, -length / 2, height / 2],
12 | [width / 2., -length / 2, height / 2], [width / 2, length / 2, height / 2],
13 | [-width / 2., length / 2, -height / 2], [-width / 2, -length / 2, -height / 2],
14 | [width / 2., -length / 2, -height / 2], [width / 2, length / 2, -height / 2],
15 | ]
16 | return car_model
17 |
18 |
19 | def get_uav_model(uav_radius, tall=0.05):
20 | length = 1.6 * uav_radius
21 | width = 1.1 * uav_radius
22 | uav_model = [[0., length - 0.5 * width, tall], [-0.5 * width, -0.5 * width, tall], [0., -0.2 * width, tall],
23 | [0.5 * width, -0.5 * width, tall],
24 | [0., length - 0.5 * width, -tall], [-0.5 * width, -0.5 * width, -tall], [0., -0.2 * width, -tall],
25 | [0.5 * width, -0.5 * width, -tall]]
26 | return uav_model
27 |
28 |
29 | def get_uav_goal_posture(length=0.9, width=0.6, tall=0.05):
30 | uav_model = [[0., length - 0.5 * width, tall], [-0.5 * width, -0.5 * width, tall], [0., -0.2 * width, tall],
31 | [0.5 * width, -0.5 * width, tall],
32 | [0., length - 0.5 * width, -tall], [-0.5 * width, -0.5 * width, -tall], [0., -0.2 * width, -tall],
33 | [0.5 * width, -0.5 * width, -tall]]
34 | return uav_model
35 |
36 |
37 | def get_cube_model(length=2.0, width=1.5, height=1.0):
38 | cube_model = [[-length / 2., width / 2, height / 2], [-length / 2, -width / 2, height / 2],
39 | [length / 2., -width / 2, height / 2], [length / 2, width / 2, height / 2],
40 | [-length / 2., width / 2, -height / 2], [-length / 2, -width / 2, -height / 2],
41 | [length / 2., -width / 2, -height / 2], [length / 2, width / 2, -height / 2],
42 | ]
43 | return cube_model
44 |
45 |
46 | def get_building_model(long=10.0, width=10.0, tall=10.0):
47 | building_model = [[-long / 2, width / 2, tall], [-long / 2, -width / 2, tall], [long / 2, -width / 2, tall],
48 | [long / 2, width / 2, tall],
49 | [-long / 2, width / 2, 0.0], [-long / 2, -width / 2, 0.0], [long / 2, -width / 2, 0.0],
50 | [long / 2, width / 2, 0.0]]
51 | return building_model
52 |
53 |
54 | def draw_env(ax, buildings_obj_list, keypoints_obj_list, connection_matrix):
55 | pass
56 |
57 |
58 | def draw(origin_pos, objects_list, buildings_obj_list, keypoints_obj_list, connection_matrix):
59 | fig = plt.figure()
60 | plt.xlabel('x (m)')
61 | plt.ylabel('y (m)')
62 | plt.xlim([-500.0, 3500.0])
63 | plt.ylim([-1000.0, 1000.0])
64 | ax = fig.add_subplot(1, 1, 1)
65 |
66 | ax.set_aspect('equal')
67 |
68 | draw_objects(ax, objects_list)
69 | draw_buildings(ax, origin_pos, buildings_obj_list)
70 | draw_roads(ax, keypoints_obj_list, connection_matrix)
71 | plt.show()
72 |
73 |
74 | def draw_buildings(ax, origin_pos, buildings_obj):
75 | for building_obj in buildings_obj:
76 | id = building_obj.id
77 | pos = building_obj.pos
78 | draw_rectangle(ax, origin_pos, pos)
79 |
80 |
81 | def draw_roads(ax, keypoints_obj_list, connection_matrix):
82 | for index_i, info in enumerate(connection_matrix):
83 | self_pos = keypoints_obj_list[index_i].pos
84 | for index_j, distance in enumerate(info):
85 | if distance > 0:
86 | target_pos = keypoints_obj_list[index_j].pos
87 | x = [self_pos[0], target_pos[0]]
88 | y = [self_pos[1], target_pos[1]]
89 | plt.plot(x, y, color='r')
90 | plt.scatter(x, y, color='b')
91 |
92 |
93 | def draw_objects(ax, objects_obj):
94 | for object_obj in objects_obj:
95 | pass
96 |
97 |
98 | def draw_rectangle(ax, origin_pos, pos):
99 | pos_x = pos[0]
100 | pos_y = pos[1]
101 | ax.add_patch(
102 | plt.Rectangle(
103 | (pos_x - 5, pos_y - 5),
104 | 10, # width
105 | 10, # height
106 | color='maroon',
107 | alpha=0.5
108 | ))
109 |
110 |
111 | def draw_agent_3d(ax, pos_global_frame, goal, c_map, heading_global_frame, my_agent_model, a_id, color='blue'):
112 | pos = pos_global_frame
113 | agent_model = my_agent_model
114 | convert_to_actual_model(agent_model, pos_global_frame, heading_global_frame)
115 | num_corner_point_per_layer = int(len(agent_model) / 2)
116 | x_list = []
117 | y_list = []
118 | z_list = []
119 | for layer in range(2):
120 | x_list.clear(), y_list.clear(), z_list.clear()
121 | for i in range(num_corner_point_per_layer):
122 | x_list.append(agent_model[i + layer * num_corner_point_per_layer][0])
123 | y_list.append(agent_model[i + layer * num_corner_point_per_layer][1])
124 | z_list.append(agent_model[i + layer * num_corner_point_per_layer][2])
125 | pannel = [list(zip(x_list, y_list, z_list))]
126 | ax.add_collection3d(Poly3DCollection(pannel, facecolors='goldenrod', alpha=0.9))
127 |
128 | for i in range(num_corner_point_per_layer - 1):
129 | x_list.clear(), y_list.clear(), z_list.clear()
130 | if i == 0:
131 | x_list.append(agent_model[0][0])
132 | x_list.append(agent_model[num_corner_point_per_layer - 1][0])
133 | x_list.append(agent_model[2 * num_corner_point_per_layer - 1][0])
134 | x_list.append(agent_model[num_corner_point_per_layer][0])
135 | y_list.append(agent_model[0][1])
136 | y_list.append(agent_model[num_corner_point_per_layer - 1][1])
137 | y_list.append(agent_model[2 * num_corner_point_per_layer - 1][1])
138 | y_list.append(agent_model[num_corner_point_per_layer][1])
139 | z_list.append(agent_model[0][2])
140 | z_list.append(agent_model[num_corner_point_per_layer - 1][2])
141 | z_list.append(agent_model[2 * num_corner_point_per_layer - 1][2])
142 | z_list.append(agent_model[num_corner_point_per_layer][2])
143 | pannel = [list(zip(x_list, y_list, z_list))]
144 | ax.add_collection3d(Poly3DCollection(pannel, facecolors=color, alpha=0.9))
145 |
146 | x_list.clear(), y_list.clear(), z_list.clear()
147 | x_list.append(agent_model[i][0])
148 | x_list.append(agent_model[i + 1][0])
149 | x_list.append(agent_model[i + 1 + num_corner_point_per_layer][0])
150 | x_list.append(agent_model[i + num_corner_point_per_layer][0])
151 | y_list.append(agent_model[i][1])
152 | y_list.append(agent_model[i + 1][1])
153 | y_list.append(agent_model[i + 1 + num_corner_point_per_layer][1])
154 | y_list.append(agent_model[i + num_corner_point_per_layer][1])
155 | z_list.append(agent_model[i][2])
156 | z_list.append(agent_model[i + 1][2])
157 | z_list.append(agent_model[i + 1 + num_corner_point_per_layer][2])
158 | z_list.append(agent_model[i + num_corner_point_per_layer][2])
159 | pannel = [list(zip(x_list, y_list, z_list))]
160 |
161 | ax.text(pos[0] - 0.1, pos[1] - 0.1, pos[2] + 0.3, r'$%s$' % a_id, fontsize=10, fontweight='bold', zorder=3)
162 | ax.plot([goal[0]], [goal[1]], [goal[2]], '*', color=c_map, markersize=10, linewidth=3.0)
163 | ax.add_collection3d(Poly3DCollection(pannel, facecolors=color, alpha=0.9)) # , alpha=0.7
164 |
165 |
166 | def draw_goal_posture(ax, goal, goal_heading_frame, my_agent_model, a_id, color='red'):
167 | agent_model = my_agent_model
168 | convert_to_actual_model(agent_model, goal, goal_heading_frame)
169 | num_corner_point_per_layer = int(len(agent_model) / 2)
170 | x_list = []
171 | y_list = []
172 | z_list = []
173 | for layer in range(2):
174 | x_list.clear(), y_list.clear(), z_list.clear()
175 | for i in range(num_corner_point_per_layer):
176 | x_list.append(agent_model[i + layer * num_corner_point_per_layer][0])
177 | y_list.append(agent_model[i + layer * num_corner_point_per_layer][1])
178 | z_list.append(agent_model[i + layer * num_corner_point_per_layer][2])
179 | pannel = [list(zip(x_list, y_list, z_list))]
180 | ax.add_collection3d(Poly3DCollection(pannel, facecolors='blue', alpha=0.9))
181 |
182 | for i in range(num_corner_point_per_layer - 1):
183 | x_list.clear(), y_list.clear(), z_list.clear()
184 | if i == 0:
185 | x_list.append(agent_model[0][0])
186 | x_list.append(agent_model[num_corner_point_per_layer - 1][0])
187 | x_list.append(agent_model[2 * num_corner_point_per_layer - 1][0])
188 | x_list.append(agent_model[num_corner_point_per_layer][0])
189 | y_list.append(agent_model[0][1])
190 | y_list.append(agent_model[num_corner_point_per_layer - 1][1])
191 | y_list.append(agent_model[2 * num_corner_point_per_layer - 1][1])
192 | y_list.append(agent_model[num_corner_point_per_layer][1])
193 | z_list.append(agent_model[0][2])
194 | z_list.append(agent_model[num_corner_point_per_layer - 1][2])
195 | z_list.append(agent_model[2 * num_corner_point_per_layer - 1][2])
196 | z_list.append(agent_model[num_corner_point_per_layer][2])
197 | pannel = [list(zip(x_list, y_list, z_list))]
198 | ax.add_collection3d(Poly3DCollection(pannel, facecolors=color, alpha=0.9))
199 |
200 | x_list.clear(), y_list.clear(), z_list.clear()
201 | x_list.append(agent_model[i][0])
202 | x_list.append(agent_model[i + 1][0])
203 | x_list.append(agent_model[i + 1 + num_corner_point_per_layer][0])
204 | x_list.append(agent_model[i + num_corner_point_per_layer][0])
205 | y_list.append(agent_model[i][1])
206 | y_list.append(agent_model[i + 1][1])
207 | y_list.append(agent_model[i + 1 + num_corner_point_per_layer][1])
208 | y_list.append(agent_model[i + num_corner_point_per_layer][1])
209 | z_list.append(agent_model[i][2])
210 | z_list.append(agent_model[i + 1][2])
211 | z_list.append(agent_model[i + 1 + num_corner_point_per_layer][2])
212 | z_list.append(agent_model[i + num_corner_point_per_layer][2])
213 | pannel = [list(zip(x_list, y_list, z_list))]
214 | ax.text(goal[0] - 0.1, goal[1] - 0.1, goal[2] + 0.3, r'$%s$' % a_id, fontsize=10, fontweight='bold', zorder=3)
215 | ax.add_collection3d(Poly3DCollection(pannel, facecolors=color, alpha=0.9))
216 |
217 |
218 | def draw_sphere(ax, obstacle_dict):
219 | center = obstacle_dict['position']
220 | radius = obstacle_dict['feature']
221 | u = np.linspace(0, 2 * np.pi, 100)
222 | v = np.linspace(0, np.pi, 100)
223 | x = radius * np.outer(np.cos(u), np.sin(v)) + center[0]
224 | y = radius * np.outer(np.sin(u), np.sin(v)) + center[1]
225 | z = radius * np.outer(np.ones(np.size(u)), np.cos(v)) + center[2]
226 | ax.plot_surface(x, y, z, rstride=4, cstride=8, color='LightGray')
227 |
228 |
229 | def CreateSphere(center, r):
230 | u = np.linspace(0, 2 * np.pi, 30)
231 | v = np.linspace(0, np.pi, 30)
232 | x = np.outer(np.cos(u), np.sin(v))
233 | y = np.outer(np.sin(u), np.sin(v))
234 | z = np.outer(np.ones(np.size(u)), np.cos(v))
235 | x, y, z = r * x + center[0], r * y + center[1], r * z + center[2]
236 | return x, y, z
237 |
238 |
239 | def draw_Spheres(ax, pos, radius, c_map):
240 | (xs, ys, zs) = CreateSphere(pos, radius)
241 | ax.plot_wireframe(xs, ys, zs, alpha=0.15, color=c_map)
242 |
243 |
244 | def draw_agent(ax, pos, goal, vel, radius, c_map, a_id):
245 | draw_Spheres(ax, pos, radius, c_map)
246 | ax.quiver(pos[0], pos[1], pos[2], vel[0], vel[1], vel[2], length=1, color=c_map)
247 | ax.text(pos[0] - 0.1, pos[1] - 0.1, pos[2] + 0.3, r'$%s$' % a_id, fontsize=10, fontweight='bold', zorder=3)
248 | ax.plot([goal[0]], [goal[1]], [goal[2]], '*', color=c_map, markersize=10, linewidth=3.0)
249 |
250 |
251 | def convert_to_actual_model(agent_model, pos_global_frame, heading_global_frame):
252 | alpha = heading_global_frame[0]
253 | beta = heading_global_frame[1]
254 | gamma = heading_global_frame[2]
255 | for point in agent_model:
256 | x = point[0]
257 | y = point[1]
258 | z = point[2]
259 | # Compute pitch pose
260 | r = sqrt(pow(y, 2) + pow(z, 2))
261 | beta_model = atan2(z, y)
262 | beta_ = beta + beta_model
263 | y = r * cos(beta_)
264 | z = r * sin(beta_)
265 | # Compute roll pose
266 | h = sqrt(pow(x, 2) + pow(z, 2))
267 | gama_model = atan2(z, x)
268 | gamma_ = gamma + gama_model
269 | x = h * cos(gamma_)
270 | z = h * sin(gamma_)
271 | # Compute yaw pose
272 | l = sqrt(pow(x, 2) + pow(y, 2))
273 | alpha_model = atan2(y, x)
274 | alpha_ = alpha + alpha_model - np.pi / 2
275 | point[0] = l * cos(alpha_) + pos_global_frame[0]
276 | point[1] = l * sin(alpha_) + pos_global_frame[1]
277 | point[2] = z + pos_global_frame[2]
278 |
--------------------------------------------------------------------------------