├── Quadcopter_Project.ipynb ├── README.md ├── __pycache__ ├── physics_sim.cpython-36.pyc ├── takeoff.cpython-36.pyc └── task.cpython-36.pyc ├── agents ├── __pycache__ │ ├── actor.cpython-36.pyc │ ├── actor_critic.cpython-36.pyc │ ├── agent.cpython-36.pyc │ ├── critic.cpython-36.pyc │ └── policy_search.cpython-36.pyc ├── agent.py └── policy_search.py ├── data.txt ├── image └── final_result.png ├── physics_sim.py └── task.py /README.md: -------------------------------------------------------------------------------- 1 | # Deep RL Quadcopter Controller 2 | ## Project: Udacity Machine Learning Nanodegree - Reinforcement Learning 3 | ## Overview: 4 | The goal of this project is to train a quadcopter to fly with a deep reinforcement learning algorithm, specifically it is trained how to take-off. For the algorithm, we use a Deep Deterministic Policy Gradient (DDPG). 5 | #### Final Result : 6 | ![Final Reward-Episode Graph](image/final_result.png) 7 | ## Contents: 8 | The contents of this repositary are: 9 | 10 | * Quadcopter_Project.ipynb: This Jupyter Notebook provides part of the code for training the quadcopter and a summary of the implementation and results. 11 | 12 | * task.py: This file defines the task (take-off), and the reward is also defined here. 13 | 14 | * physics_sim.py: This file introduces a physical simulator for the motion of the quadcopter. 15 | 16 | * agents/agent.py: This file defines the the DDPG algorithm. 17 | -------------------------------------------------------------------------------- /__pycache__/physics_sim.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/__pycache__/physics_sim.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/takeoff.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/__pycache__/takeoff.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/task.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/__pycache__/task.cpython-36.pyc -------------------------------------------------------------------------------- /agents/__pycache__/actor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/agents/__pycache__/actor.cpython-36.pyc -------------------------------------------------------------------------------- /agents/__pycache__/actor_critic.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/agents/__pycache__/actor_critic.cpython-36.pyc -------------------------------------------------------------------------------- /agents/__pycache__/agent.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/agents/__pycache__/agent.cpython-36.pyc -------------------------------------------------------------------------------- /agents/__pycache__/critic.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/agents/__pycache__/critic.cpython-36.pyc -------------------------------------------------------------------------------- /agents/__pycache__/policy_search.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/agents/__pycache__/policy_search.cpython-36.pyc -------------------------------------------------------------------------------- /agents/agent.py: -------------------------------------------------------------------------------- 1 | import random 2 | from collections import namedtuple, deque 3 | 4 | from keras import layers, models, optimizers 5 | from keras import backend as K 6 | 7 | import numpy as np 8 | import copy 9 | 10 | from task import Task 11 | 12 | 13 | class ReplayBuffer: 14 | """Fixed-size buffer to store experience tuples.""" 15 | 16 | def __init__(self, buffer_size, batch_size): 17 | """Initialize a ReplayBuffer object. 18 | Params 19 | ====== 20 | buffer_size: maximum size of buffer 21 | batch_size: size of each training batch 22 | """ 23 | self.memory = deque(maxlen=buffer_size) # internal memory (deque) 24 | self.batch_size = batch_size 25 | self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"]) 26 | 27 | def add(self, state, action, reward, next_state, done): 28 | """Add a new experience to memory.""" 29 | e = self.experience(state, action, reward, next_state, done) 30 | self.memory.append(e) 31 | 32 | def sample(self, batch_size=64): 33 | """Randomly sample a batch of experiences from memory.""" 34 | return random.sample(self.memory, k=self.batch_size) 35 | 36 | def __len__(self): 37 | """Return the current size of internal memory.""" 38 | return len(self.memory) 39 | 40 | 41 | class Actor: 42 | """Actor (Policy) Model.""" 43 | 44 | def __init__(self, state_size, action_size, action_low, action_high): 45 | """Initialize parameters and build model. 46 | 47 | Params 48 | ====== 49 | state_size (int): Dimension of each state 50 | action_size (int): Dimension of each action 51 | action_low (array): Min value of each action dimension 52 | action_high (array): Max value of each action dimension 53 | """ 54 | self.state_size = state_size 55 | self.action_size = action_size 56 | self.action_low = action_low 57 | self.action_high = action_high 58 | self.action_range = self.action_high - self.action_low 59 | 60 | # Initialize any other variables here 61 | 62 | self.build_model() 63 | 64 | def build_model(self): 65 | """Build an actor (policy) network that maps states -> actions.""" 66 | # Define input layer (states) 67 | states = layers.Input(shape=(self.state_size,), name='states') 68 | 69 | # Add hidden layers 70 | net = layers.Dense(units=32, activation='relu')(states) 71 | net = layers.BatchNormalization()(net) 72 | net = layers.Dense(units=64, activation='relu')(net) 73 | net = layers.BatchNormalization()(net) 74 | net = layers.Dense(units=32, activation='relu')(net) 75 | 76 | # Try different layer sizes, activations, add batch normalization, regularizers, etc. 77 | 78 | # Add final output layer with sigmoid activation 79 | raw_actions = layers.Dense(units=self.action_size, activation='sigmoid', 80 | name='raw_actions')(net) 81 | 82 | # Scale [0, 1] output for each action dimension to proper range 83 | actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low, 84 | name='actions')(raw_actions) 85 | 86 | # Create Keras model 87 | self.model = models.Model(inputs=states, outputs=actions) 88 | 89 | # Define loss function using action value (Q value) gradients 90 | action_gradients = layers.Input(shape=(self.action_size,)) 91 | loss = K.mean(-action_gradients * actions) 92 | 93 | # Incorporate any additional losses here (e.g. from regularizers) 94 | 95 | # Define optimizer and training function 96 | optimizer = optimizers.Adam() 97 | updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss) 98 | self.train_fn = K.function( 99 | inputs=[self.model.input, action_gradients, K.learning_phase()], 100 | outputs=[], 101 | updates=updates_op) 102 | 103 | 104 | class Critic: 105 | """Critic (Value) Model.""" 106 | 107 | def __init__(self, state_size, action_size): 108 | """Initialize parameters and build model. 109 | 110 | Params 111 | ====== 112 | state_size (int): Dimension of each state 113 | action_size (int): Dimension of each action 114 | """ 115 | self.state_size = state_size 116 | self.action_size = action_size 117 | 118 | # Initialize any other variables here 119 | 120 | self.build_model() 121 | 122 | def build_model(self): 123 | """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" 124 | # Define input layers 125 | states = layers.Input(shape=(self.state_size,), name='states') 126 | actions = layers.Input(shape=(self.action_size,), name='actions') 127 | 128 | # Add hidden layer(s) for state pathway 129 | net_states = layers.Dense(units=32, activation='relu')(states) 130 | net_states = layers.Dense(units=64, activation='relu')(net_states) 131 | 132 | # Add hidden layer(s) for action pathway 133 | net_actions = layers.Dense(units=32, activation='relu')(actions) 134 | net_actions = layers.Dense(units=64, activation='relu')(net_actions) 135 | 136 | # Try different layer sizes, activations, add batch normalization, regularizers, etc. 137 | 138 | # Combine state and action pathways 139 | net = layers.Add()([net_states, net_actions]) 140 | net = layers.Activation('relu')(net) 141 | 142 | # Add more layers to the combined network if needed 143 | 144 | # Add final output layer to prduce action values (Q values) 145 | Q_values = layers.Dense(units=1, name='q_values')(net) 146 | 147 | # Create Keras model 148 | self.model = models.Model(inputs=[states, actions], outputs=Q_values) 149 | 150 | # Define optimizer and compile model for training with built-in loss function 151 | optimizer = optimizers.Adam() 152 | self.model.compile(optimizer=optimizer, loss='mse') 153 | 154 | # Compute action gradients (derivative of Q values w.r.t. to actions) 155 | action_gradients = K.gradients(Q_values, actions) 156 | 157 | # Define an additional function to fetch action gradients (to be used by actor model) 158 | self.get_action_gradients = K.function( 159 | inputs=[*self.model.input, K.learning_phase()], 160 | outputs=action_gradients) 161 | 162 | 163 | class DDPG(): 164 | """Reinforcement Learning agent that learns using DDPG.""" 165 | def __init__(self, task): 166 | self.task = task 167 | self.state_size = task.state_size 168 | self.action_size = task.action_size 169 | self.action_low = task.action_low 170 | self.action_high = task.action_high 171 | 172 | # Actor (Policy) Model 173 | self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high) 174 | self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high) 175 | 176 | # Critic (Value) Model 177 | self.critic_local = Critic(self.state_size, self.action_size) 178 | self.critic_target = Critic(self.state_size, self.action_size) 179 | 180 | # Initialize target model parameters with local model parameters 181 | self.critic_target.model.set_weights(self.critic_local.model.get_weights()) 182 | self.actor_target.model.set_weights(self.actor_local.model.get_weights()) 183 | 184 | # Noise process 185 | self.exploration_mu = 0 186 | self.exploration_theta = 0.15 187 | self.exploration_sigma = 0.2 188 | self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma) 189 | 190 | # Replay memory 191 | self.buffer_size = 100000 192 | self.batch_size = 64 193 | self.memory = ReplayBuffer(self.buffer_size, self.batch_size) 194 | 195 | # Algorithm parameters 196 | self.gamma = 0.99 # discount factor 197 | self.tau = 0.01 # for soft update of target parameters 198 | 199 | self.rewards = [] 200 | self.total_reward = 0 201 | 202 | 203 | # Score tracker and learning parameters - this was added 204 | #self.episode_duration = 0 205 | #self.total_reward = 0 206 | #self.score = None 207 | #self.best_score = -np.inf 208 | 209 | def reset_episode(self): 210 | self.noise.reset() 211 | self.total_reward = 0.0 # this was added 212 | state = self.task.reset() 213 | self.last_state = state 214 | return state 215 | 216 | def step(self, action, reward, next_state, done): 217 | # Save experience / reward 218 | self.memory.add(self.last_state, action, reward, next_state, done) 219 | # Add reward to total_reward 220 | self.total_reward += reward # this was added 221 | 222 | if done: 223 | self.rewards.append(self.total_reward) 224 | 225 | # Roll over last state and action 226 | self.last_state = next_state 227 | 228 | # Learn, if enough samples are available in memory 229 | if len(self.memory) > self.batch_size: 230 | experiences = self.memory.sample() 231 | self.learn(experiences) 232 | 233 | # Roll over last state and action 234 | self.last_state = next_state 235 | 236 | def act(self, state): 237 | """Returns actions for given state(s) as per current policy.""" 238 | state = np.reshape(state, [-1, self.state_size]) 239 | action = self.actor_local.model.predict(state)[0] 240 | return list(action + self.noise.sample()) # add some noise for exploration 241 | 242 | def learn(self, experiences): 243 | """Update policy and value parameters using given batch of experience tuples.""" 244 | # Convert experience tuples to separate arrays for each element (states, actions, rewards, etc.) 245 | states = np.vstack([e.state for e in experiences if e is not None]) 246 | actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape(-1, self.action_size) 247 | rewards = np.array([e.reward for e in experiences if e is not None]).astype(np.float32).reshape(-1, 1) 248 | dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1, 1) 249 | next_states = np.vstack([e.next_state for e in experiences if e is not None]) 250 | 251 | # Get predicted next-state actions and Q values from target models 252 | # Q_targets_next = critic_target(next_state, actor_target(next_state)) 253 | actions_next = self.actor_target.model.predict_on_batch(next_states) 254 | Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next]) 255 | 256 | # Compute Q targets for current states and train critic model (local) 257 | Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones) 258 | self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets) 259 | 260 | # Train actor model (local) 261 | action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size)) 262 | self.actor_local.train_fn([states, action_gradients, 1]) # custom training function 263 | 264 | # Soft-update target models 265 | self.soft_update(self.critic_local.model, self.critic_target.model) 266 | self.soft_update(self.actor_local.model, self.actor_target.model) 267 | 268 | def soft_update(self, local_model, target_model): 269 | """Soft update model parameters.""" 270 | local_weights = np.array(local_model.get_weights()) 271 | target_weights = np.array(target_model.get_weights()) 272 | 273 | assert len(local_weights) == len(target_weights), "Local and target model parameters must have the same size" 274 | 275 | new_weights = self.tau * local_weights + (1 - self.tau) * target_weights 276 | target_model.set_weights(new_weights) 277 | 278 | 279 | class OUNoise: 280 | """Ornstein-Uhlenbeck process.""" 281 | 282 | def __init__(self, size, mu, theta, sigma): 283 | """Initialize parameters and noise process.""" 284 | self.mu = mu * np.ones(size) 285 | self.theta = theta 286 | self.sigma = sigma 287 | self.reset() 288 | 289 | def reset(self): 290 | """Reset the internal state (= noise) to mean (mu).""" 291 | self.state = copy.copy(self.mu) 292 | 293 | def sample(self): 294 | """Update internal state and return it as a noise sample.""" 295 | x = self.state 296 | dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x)) 297 | self.state = x + dx 298 | return self.state -------------------------------------------------------------------------------- /agents/policy_search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from task import Task 3 | 4 | class PolicySearch_Agent(): 5 | def __init__(self, task): 6 | # Task (environment) information 7 | self.task = task 8 | self.state_size = task.state_size 9 | self.action_size = task.action_size 10 | self.action_low = task.action_low 11 | self.action_high = task.action_high 12 | self.action_range = self.action_high - self.action_low 13 | 14 | self.w = np.random.normal( 15 | size=(self.state_size, self.action_size), # weights for simple linear policy: state_space x action_space 16 | scale=(self.action_range / (2 * self.state_size))) # start producing actions in a decent range 17 | 18 | # Score tracker and learning parameters 19 | self.best_w = None 20 | self.best_score = -np.inf 21 | self.noise_scale = 0.1 22 | 23 | # Episode variables 24 | self.reset_episode() 25 | 26 | def reset_episode(self): 27 | self.total_reward = 0.0 28 | self.count = 0 29 | state = self.task.reset() 30 | return state 31 | 32 | def step(self, reward, done): 33 | # Save experience / reward 34 | self.total_reward += reward 35 | self.count += 1 36 | 37 | # Learn, if at end of episode 38 | if done: 39 | self.learn() 40 | 41 | def act(self, state): 42 | # Choose action based on given state and policy 43 | action = np.dot(state, self.w) # simple linear policy 44 | return action 45 | 46 | def learn(self): 47 | # Learn by random policy search, using a reward-based score 48 | self.score = self.total_reward / float(self.count) if self.count else 0.0 49 | if self.score > self.best_score: 50 | self.best_score = self.score 51 | self.best_w = self.w 52 | self.noise_scale = max(0.5 * self.noise_scale, 0.01) 53 | else: 54 | self.w = self.best_w 55 | self.noise_scale = min(2.0 * self.noise_scale, 3.2) 56 | self.w = self.w + self.noise_scale * np.random.normal(size=self.w.shape) # equal noise in all directions 57 | -------------------------------------------------------------------------------- /data.txt: -------------------------------------------------------------------------------- 1 | time,x,y,z,phi,theta,psi,x_velocity,y_velocity,z_velocity,phi_velocity,theta_velocity,psi_velocity,rotor_speed1,rotor_speed2,rotor_speed3,rotor_speed4 2 | 0.06,-4.52189145845e-07,5.65932276309e-07,7.00204869315,0.00048755120112,0.000408561463701,0.0,-3.75190463648e-05,4.60888185186e-05,0.0682875084515,0.0207796611068,0.0177908789754,0.0,425.4173809209783,426.9376696802673,426.37440510572435,428.14560897413827 3 | 0.12000000000000001,-1.36927782605e-05,1.62434247894e-05,7.00649318072,0.00169674736689,0.00157256941177,0.0,-0.000503198187549,0.000585706410029,0.0798610287971,0.0188702211408,0.0224383802848,0.0,407.6121144992345,408.0271543502081,408.04400065902104,407.8735520817571 4 | 0.18,-8.07221073667e-05,8.98726954023e-05,7.01384267145,0.0024996062874,0.00292747315992,0.0,-0.00186592664142,0.00196840104905,0.165112349286,-0.00083207841933,0.0228668652854,0.0,433.1245355603657,433.16061060532587,432.1041779256752,430.4402696330436 5 | 0.23999999999999996,-0.000281386941732,0.0002771786025,7.03392395882,0.00229678157276,0.00442869774336,0.0,-0.00502047226835,0.00425831534955,0.504175923223,-0.00903870104339,0.0290122873563,0.0,506.7618184409068,507.2033095244084,507.66441472568397,507.07538555543016 6 | 0.3,-0.000669022470738,0.000572664113836,7.06468306681,0.00186485746356,0.00628976734101,0.0,-0.0080624820886,0.00554769027309,0.521119219478,-0.00375166421119,0.0347535035213,0.0,409.4612275648897,409.9720037518151,410.0045958747343,410.4740859452812 7 | 0.36000000000000004,-0.00130347393801,0.000946770638487,7.10086769429,0.00120632192595,0.0083611927037,0.0,-0.0133169933374,0.00688061097212,0.684949117973,-0.0321377526681,0.0340603563185,0.0,456.5980361494005,456.5432085334613,458.5921015603764,456.33217698834267 8 | 0.4200000000000001,-0.00228746666399,0.00137818320979,7.14547966979,6.28294284092,0.0102736164668,0.0,-0.0196893145946,0.00732687859138,0.802039603826,-0.0116601618408,0.0270973302936,0.0,443.815902563154,443.2444846538296,441.2213096719494,442.90830617433517 9 | 0.48000000000000015,-0.00375472396847,0.0018079138096,7.20295654961,6.28238209455,0.0120858806961,0.0,-0.0294466548658,0.00691403110617,1.11360339971,-0.00507796674569,0.0357881641062,0.0,499.8135965508777,500.44668556233097,500.1081188715444,500.5873875794886 10 | 0.5400000000000001,-0.00574847385825,0.00220767862337,7.27022038025,6.28254247799,0.0149831607857,0.0,-0.0372437361724,0.00640798990105,1.12850644106,0.0148623742718,0.0657835588067,0.0,408.7112726568705,411.37716076565516,409.674928751778,411.44391848196443 11 | 0.6000000000000002,-0.00829339742276,0.00258339688545,7.33996729026,0.000792171130492,0.0195545732986,0.0,-0.0480055057147,0.00623332962704,1.19631460175,0.0376835401268,0.0914877893694,0.0,426.9284501773488,429.1203651442851,426.7756764085506,428.71899766818547 12 | 0.6600000000000003,-0.0115576111984,0.00298000643989,7.41274961241,0.00307434768912,0.024807814215,0.0,-0.0612972123988,0.00719625259979,1.22971588803,0.0387324047597,0.0782271893684,0.0,416.7684308635622,415.61478385982497,417.24574250154143,417.338092236789 13 | 0.7200000000000003,-0.0157602186354,0.00348328030515,7.48928518095,0.00504298125579,0.0299551110079,0.0,-0.0792861783349,0.0097993444514,1.32135319868,0.0170529589387,0.0977316261784,0.0,434.9191536821915,436.55559501354446,436.843185514837,435.03226046107227 14 | 0.7800000000000004,-0.0211652489683,0.00418031864922,7.57177203019,0.00581550194154,0.0357469356959,0.0,-0.101478013456,0.0135286943423,1.42806009177,0.0028359164277,0.0940147617128,0.0,440.627524560482,440.3282204997056,441.87150103393594,440.6979372473468 15 | 0.8400000000000004,-0.0281189050819,0.00512945132518,7.66327797804,0.00610416569332,0.0418292031892,0.0,-0.130984054234,0.0181342894339,1.62185559207,0.00849470833325,0.11305319203,0.0,467.3413047597612,468.8309137202451,467.42723564080364,467.8678931144621 16 | 0.9000000000000005,-0.0369872572939,0.00636251211764,7.76633865605,0.00632097165369,0.0486094822149,0.0,-0.165399007672,0.0230105006025,1.81317878217,-0.00857916340661,0.11290747942,0.0,466.8918587308828,466.8911749922215,469.24909149432654,467.92187697494757 17 | 0.9600000000000005,-0.0481172658368,0.00789362732063,7.8816324644,0.00587576394866,0.0554277994174,0.0,-0.206393876842,0.0279670317811,2.02954412671,-0.00519729790028,0.115060941713,0.0,476.5145473627485,476.68966678195227,474.8871747532485,475.1466114293026 18 | 1.0200000000000005,-0.0616904263976,0.00969342318982,8.00669509932,0.00533412553369,0.0623083394327,0.0,-0.246746506366,0.031982024605,2.13896529218,-0.0180678107313,0.113894502888,0.0,444.30522541863013,444.2213291630229,444.79845521074964,443.74333724579685 19 | 1.0800000000000005,-0.0779268499172,0.0117276292734,8.13985157511,0.00411887507907,0.0693537421878,0.0,-0.295218966242,0.0356958848489,2.29921823123,-0.0250304899348,0.123677393954,0.0,459.56643860257617,460.3529696417652,461.25283238055357,460.7026969588813 20 | 1.1400000000000006,-0.0970390067776,0.0139451847051,8.27987333434,0.00269480977862,0.0773287044153,0.0,-0.342597545149,0.0380794713761,2.36797041663,-0.0212609990497,0.146887391529,0.0,432.97442778249274,434.9389998630532,431.20394974771034,431.52217379032226 21 | 1.2000000000000006,-0.119128130021,0.0162775693003,8.42365601395,0.000998240611398,0.0856803966925,0.0,-0.394525883328,0.0395300244478,2.42458867524,-0.0484747740919,0.115757781878,0.0,430.26522767901935,427.6398092691191,430.5862661147306,428.27750410385977 22 | 1.2600000000000007,-0.145025764855,0.018662933032,8.57698368575,6.28228583294,0.0920981983914,0.0,-0.469535961037,0.0396947510251,2.6856636359,-0.0101693295722,0.0766518603132,0.0,492.2528468630676,489.35876407758815,491.16041169258176,493.99251592536183 23 | 1.3200000000000007,-0.175384098297,0.0210208113267,8.74389622221,6.28165811141,0.096456095143,0.0,-0.542886666208,0.0388302024253,2.87755499293,-0.0110539403882,0.0630555763076,0.0,473.1837185302271,472.1395952566832,473.2769533892084,473.2089955095465 24 | 1.3800000000000008,-0.209996822572,0.0233166491078,8.91978615669,6.28146745716,0.100305405814,0.0,-0.611240587413,0.037657757057,2.98510795054,0.00916809979052,0.0662673920146,0.0,448.09208251886804,448.35607595293806,447.78395310359963,449.4260432588204 25 | 1.4400000000000008,-0.248793340639,0.0255422096249,9.1021247544,6.28171688596,0.104259153552,0.0,-0.682360148082,0.0365720708904,3.09249435947,-0.00845297357649,0.0651424274737,0.0,448.8717485991383,448.7836177094865,450.0461744509125,448.6175952183755 26 | 1.5000000000000009,-0.29209798631,0.0277015928042,9.29238954669,6.28077940246,0.108295018484,0.0,-0.761525266657,0.0353357167034,3.2491559165,-0.0365419946561,0.0712005229075,0.0,464.5006109742256,464.97861770632545,466.2428419647841,464.04309990805734 27 | 1.560000000000001,-0.339780051435,0.0297706694192,9.48778785076,6.27831930927,0.11297927396,0.0,-0.828280329771,0.0334275418316,3.26400574418,-0.0518776394348,0.0891266794201,0.0,420.1501703731038,421.7066654116344,421.9680417316685,420.64098585911364 28 | 1.620000000000001,-0.392143331406,0.0316515784966,9.68918845468,6.27621542485,0.117964563724,0.0,-0.917701294013,0.0289881932202,3.4486955498,-0.0136336852294,0.0669057219211,0.0,475.52333322318844,473.8230250219663,473.15377152179894,476.08825703411316 29 | 1.680000000000001,-0.449561260274,0.0332508487141,9.8981074991,6.27547589101,0.12253597874,0.0,-0.996633166746,0.0242475151123,3.51498023587,-0.00982993775792,0.0902205479376,0.0,438.7668229461983,440.70274671970543,439.277849218749,439.59329246904224 30 | 1.740000000000001,-0.511905850583,0.0345460681933,10.1117483205,6.27452475693,0.128162669456,0.0,-1.08204728954,0.0188562464429,3.60597873976,-0.0319871160692,0.100078712429,0.0,447.57116333557025,448.3801408912954,449.06493201062796,447.2639509629214 31 | 1.8000000000000012,-0.579858646442,0.0354690417138,10.3336348081,6.27313263216,0.133888576067,0.0,-1.18364734976,0.0117316882898,3.78949248552,-0.00975331590583,0.083988442211,0.0,477.4838248603631,476.2616567575593,476.4500387934332,478.146886003369 32 | 1.8600000000000012,-0.65327187828,0.035995255262,10.5608588982,6.27292284224,0.138587815101,0.0,-1.26389661751,0.00577738240852,3.78455117763,0.00674845423062,0.0634925733411,0.0,419.89753125560594,418.1207142731564,419.041661902193,420.4738696292063 33 | 1.9200000000000013,-0.732219197036,0.0361177802023,10.7923206365,6.27377613292,0.142227762798,0.0,-1.36802825909,-0.00161505395168,3.93022285012,0.0260596642222,0.0543126913119,0.0,468.899409653246,468.1880705692337,465.68749513018173,467.1955980256909 34 | 1.9800000000000013,-0.816797407033,0.035864866861,11.027648087,6.27593892305,0.14546679032,0.0,-1.45154482414,-0.00665254543966,3.91398822573,0.0508820129031,0.0533180083465,0.0,417.85402582445613,417.76943098535867,415.3672508295305,417.5392795081418 35 | 2.0400000000000014,-0.907164774983,0.0353194136495,11.2668714123,6.27968880305,0.14884030768,0.0,-1.56099835722,-0.0111480698121,4.05948355266,0.0790906336951,0.0614822083835,0.0,468.72803206049895,469.36407501109784,467.4552483761752,469.6508563821919 36 | 2.1000000000000014,-1.00425480937,0.0345914904202,11.51530392,0.00118680785373,0.152402407586,0.0,-1.67566571963,-0.0125907526952,4.22086304993,0.0759008119305,0.0547666874281,0.0,474.63158839064783,474.1180925480084,475.4933521029024,475.25331947360286 37 | 2.1600000000000015,-1.10746454714,0.0338705397143,11.7679237004,0.00564336491109,0.156267328944,0.0,-1.76499654742,-0.0110622582286,4.19975599809,0.0708124032402,0.0788655207402,0.0,417.25070913129775,419.35272299277165,419.2209061256079,418.7831500713338 38 | 2.2200000000000015,-1.21678279621,0.0333434463805,12.0235103561,0.00992240526261,0.160837267489,0.0,-1.87937893078,-0.00606493040111,4.31916695533,0.0723080327299,0.0701339304319,0.0,463.1706789622135,462.487699887735,464.8476798035754,464.96853759406423 39 | 2.2800000000000016,-1.33302443789,0.0332048532191,12.2859893696,0.0145605565817,0.165121553623,0.0,-1.99573195372,0.00190426129875,4.42952086069,0.0857749096046,0.0738311994129,0.0,461.9172503214153,462.21245468085004,462.20799612041276,463.2726394810058 40 | 2.3400000000000016,-1.45607830704,0.0336204169719,12.553469557,0.0204938181844,0.170440900701,0.0,-2.10652193374,0.0124778557504,4.48606124665,0.116981787741,0.108356369491,0.0,445.77359475659694,448.59232853042755,445.36774477693274,447.92109450277064 41 | 2.4000000000000017,-1.58678971477,0.0349026010045,12.8294723876,0.0273969988001,0.176897264205,0.0,-2.2511575469,0.0310742423654,4.71279139426,0.110889952357,0.106062915817,0.0,498.85669305895294,498.69797094186595,499.37534783487877,498.94095450952193 42 | 2.4600000000000017,-1.72469505721,0.037204721524,13.1098801405,0.0336979693047,0.183815019261,0.0,-2.34627344044,0.0461583949142,4.63423838892,0.0894499798394,0.129262434245,0.0,404.19996960132426,406.29862626916747,406.782017111096,404.86776916028253 43 | 2.520000000000002,-1.8690795545,0.0406259298834,13.3890412695,0.0398425815124,0.191242570432,0.0,-2.46723559075,0.0684060898598,4.67066881433,0.120354937927,0.109665402331,0.0,445.2844328592607,443.69185554948956,442.12941776184425,444.6773781723011 44 | 2.580000000000002,-2.02093281885,0.0455155581832,13.6706936044,0.0472127544049,0.198291126041,0.0,-2.59516499952,0.0952470351491,4.71721966635,0.127382524645,0.129729588416,0.0,447.1878712842069,448.83151514227404,448.0597401967726,448.64427583273226 45 | 2.640000000000002,-2.18064528804,0.0521647947685,13.9552761763,0.0552187897579,0.20577961134,0.0,-2.72928170318,0.127109286901,4.76826855563,0.143389811952,0.112504031556,0.0,452.925910184844,451.5519133474375,448.6583001175833,449.9722443177611 46 | 2.700000000000002,-2.34822598574,0.0607913099843,14.2413119024,0.063579046886,0.212188770386,0.0,-2.85734367837,0.161161343546,4.76591177155,0.129676608979,0.0919362661875,0.0,435.90088677079495,434.18850515731936,435.07316130645796,433.94170509415727 47 | 2.760000000000002,-2.52497334873,0.0720117560038,14.5334399896,0.0713489781434,0.218263050582,0.0,-3.0347433841,0.213680189579,4.97032077185,0.129141333803,0.115285857866,0.0,498.4863526997129,500.1973149182538,498.8778476057599,498.8521041618612 48 | 2.820000000000002,-2.71117974112,0.086125848921,14.8317510364,0.0788196932059,0.225328476941,0.0,-3.17277011737,0.257429648373,4.97294532548,0.113126619033,0.122286707914,0.0,439.59830247992545,440.19070499305,441.0058949112679,439.69496591388184 49 | 2.880000000000002,-2.90653440399,0.103243192414,15.1333100813,0.0852736417656,0.232796036873,0.0,-3.33971477225,0.313755369426,5.07801161425,0.0931200073482,0.128480694289,0.0,473.72144304825065,474.21054568579257,473.7045675349863,472.17258328597717 50 | 2.940000000000002,-3.11067824998,0.123360692614,15.435273209,0.0909079365369,0.240511695385,0.0,-3.46577731366,0.357245248303,4.98741597464,0.0954291378084,0.128820336106,0.0,412.0877182094524,412.1342846938212,411.20382094859605,411.4166117995679 51 | 3.000000000000002,-3.32327899751,0.146451869527,15.7348251124,0.0965752725814,0.249033108659,0.0,-3.62164703347,0.412915688348,4.99705471119,0.0924353287265,0.160200168577,0.0,443.0221353211882,445.613339725434,447.19181265750836,446.9553446300704 52 | 3.0600000000000023,-3.54570298082,0.173092360006,16.0360456324,0.10213559803,0.258360409756,0.0,-3.79332428162,0.475542052378,5.04275570929,0.0931411203302,0.143713839122,0.0,459.0672003900363,457.77877804429056,458.8753973610024,458.9383787699335 53 | 3.1200000000000023,-3.77789640614,0.203310199126,16.3372296907,0.107893566982,0.26810166404,0.0,-3.94727517594,0.532139002092,4.99631352824,0.101086139848,0.18523318261,0.0,428.379356957766,431.92069115841036,430.17611548208913,430.85664514581254 54 | 3.1800000000000024,-4.01995965191,0.237189204678,16.6371739491,0.113803161196,0.278875979428,0.0,-4.12244689069,0.597606629444,5.00103325017,0.0927299014325,0.164785623116,0.0,449.7522496893678,448.1232116618138,448.2051723647289,447.53361151207474 55 | 3.2400000000000024,-4.27415497959,0.275656544693,16.9421947605,0.120714476317,0.288559135121,0.0,-4.35156341601,0.685161072074,5.16458646011,0.140812209047,0.153543289223,0.0,500.45338038526086,499.65579594054606,496.8663586095668,500.38873176727134 56 | 3.3000000000000025,-4.53932632968,0.318304077061,17.2469275319,0.128934816893,0.29751089427,0.0,-4.48831839509,0.736970300368,4.99336277893,0.128043596317,0.138670275694,0.0,392.65239853110785,391.293826359302,393.3738755128846,392.20936150903316 57 | 3.3600000000000025,-4.81395568592,0.364625452021,17.5449145897,0.136941736339,0.306465910421,0.0,-4.66677126969,0.807636262578,4.93900320894,0.142505381146,0.16472983864,0.0,432.3549749519561,434.56645428740524,433.68664742731846,434.9166040302843 58 | 3.4200000000000026,-5.09994068972,0.415513970459,17.8410298979,0.146082284247,0.316134063563,0.0,-4.86690020103,0.889295204011,4.93058254289,0.166997401011,0.152765855769,0.0,450.57285904234396,449.6286185556639,450.22295070315306,452.2216254629935 59 | 3.4800000000000026,-5.39729431902,0.471079697059,18.1340316111,0.155833782371,0.325497436147,0.0,-5.04572033328,0.963566848889,4.83569548212,0.151624619838,0.161929246472,0.0,422.584068711497,423.39771282429746,424.1148282444487,422.81850860230605 60 | 3.5400000000000027,-5.70680459334,0.531812344029,18.4249010117,0.164418145434,0.335236260537,0.0,-5.27209632039,1.06148273308,4.85870233277,0.114531263668,0.163070838132,0.0,464.7796116416402,464.8929035790247,465.0430999496563,462.1451132330149 61 | 3.6000000000000028,-6.02870247937,0.59789137404,18.7130749021,0.171330752215,0.345175998966,0.0,-5.4587125922,1.14157053772,4.74665157444,0.116533163853,0.170391836238,0.0,421.0533028879983,421.714137208022,420.5639332676181,420.75000488656997 62 | 3.660000000000003,-6.36285458876,0.669276171883,18.9968198517,0.178329079202,0.355939440106,0.0,-5.68057025332,1.23832460064,4.71049291153,0.116848991432,0.193077229158,0.0,447.73238360975495,449.60487208645475,448.5456630961971,448.58336151350005 63 | 3.720000000000003,-6.71123870841,0.746897622961,19.2801200264,0.18569594351,0.367381152691,0.0,-5.93313929823,1.34943889021,4.73131779075,0.132574159006,0.185455185763,0.0,470.2233960157208,469.66563986939445,469.30590745310553,470.53775420357033 64 | 3.780000000000003,-7.07372121534,0.830673861992,19.5612237216,0.19406887246,0.37829216232,0.0,-6.1505337778,1.44355070527,4.63796089717,0.150735611312,0.173451610112,0.0,436.4943627560329,435.5238054016094,432.6437007760545,434.18888117572124 65 | 3.840000000000003,-7.45034270156,0.920635438205,19.8387887679,0.203605019833,0.389376710678,0.0,-6.40436089365,1.55568223096,4.61280372177,0.17166686566,0.200980641385,0.0,459.0030248337019,461.21413212384095,459.56659579735793,461.2457183259036 66 | 3.900000000000003,-7.84338707805,1.0179342683,20.1169594902,0.213808570635,0.401076112459,0.0,-6.69795344704,1.68815921219,4.65746032916,0.166637797344,0.179010114169,0.0,487.58744069139885,485.9760028854113,486.3064751560885,485.95453187268424 67 | 3.960000000000003,-8.25357466975,1.12297048131,20.3959394266,0.223706512124,0.412378764628,0.0,-6.97576176258,1.8135311989,4.64015686238,0.161397433475,0.202492578572,0.0,469.2132364164165,471.0649940355384,471.5584465384563,471.1776166293634 68 | 4.020000000000002,-8.68068983957,1.23566841785,20.673733824,0.233644040334,0.424383125791,0.0,-7.26224065183,1.94347931123,4.61779859707,0.172957337818,0.194740872275,0.0,473.5817280592383,473.0214351010622,472.33665266423174,473.25181345053454 69 | 4.080000000000001,-9.12349031739,1.35539163142,20.946240486,0.243488242876,0.43578425177,0.0,-7.49871021585,2.04773423048,4.4648076899,0.133077574936,0.178354348545,0.0,432.2047529712162,430.85711520198595,433.1595890719361,429.81698934016185 70 | 4.14,-9.58121884755,1.48173292339,21.2105253579,0.251570795747,0.446833239861,0.0,-7.75975830637,2.1638891416,4.34347892392,0.137784400373,0.193749716275,0.0,444.3043873840333,445.59773710189023,445.6056171856592,446.00699086740025 71 | 4.199999999999998,-10.0551903364,1.61530244387,21.4680939856,0.259333245378,0.458194464859,0.0,-8.04011686759,2.28861492982,4.24064099836,0.101769231306,0.178687076559,0.0,456.2703407847315,455.09957094996724,455.86257375091714,452.9888341932832 72 | 4.259999999999997,-10.547637564,1.75715485935,21.7220989358,0.264939560387,0.468654385137,0.0,-8.37538848394,2.43969647561,4.22400528503,0.0663244286195,0.16378587955,0.0,489.2526739027041,488.16771236581945,489.0576127145681,486.41573068251773 73 | 4.319999999999996,-11.0585315892,1.90711466221,21.9712812888,0.268711570648,0.478405905134,0.0,-8.65521041011,2.55884252101,4.08088395541,0.0548599003898,0.159880680349,0.0,449.92990443849345,449.6383213994546,448.7000598488152,447.77085166330806 74 | 4.379999999999995,-11.586272509,2.06414062249,22.2114426405,0.271711317098,0.487487204316,0.0,-8.93696360135,2.67517801556,3.92336303071,0.0378643536639,0.122996820293,0.0,448.06098211603324,445.071722857934,447.45235436916096,446.06765084691654 75 | 4.439999999999993,-12.1314535505,2.22829634568,22.4426117511,0.273755979972,0.494348083689,0.0,-9.23643563115,2.79641265842,3.78110602396,0.0251614735294,0.0851127342881,0.0,456.6688119712956,453.6464498448771,455.18044732490745,454.1628274623358 76 | 4.499999999999992,-12.6943881355,2.39949433004,22.6643909369,0.276152042953,0.499476109782,0.0,-9.52877881775,2.90997185538,3.6106560862,0.0595945642502,0.0861661205939,0.0,446.93837014871286,447.03020341875407,446.0865887753627,448.89121470383554 77 | 4.559999999999991,-13.2759977036,2.577961604,22.8774934353,0.279734919906,0.504261773694,0.0,-9.8587550853,3.03869923812,3.4913661553,0.0599535415797,0.0621568322458,0.0,468.10770254282596,466.23959337152206,469.1599070302815,469.1900899230812 78 | 4.6199999999999894,-13.8758044973,2.76324794339,23.0801868396,0.283426293207,0.50807036898,0.0,-10.1357172436,3.13744884277,3.26454370251,0.0644887999051,0.0659923002672,0.0,431.5609926908776,431.8877887079815,431.0339059314004,431.4199850864795 79 | 4.679999999999988,-14.492733686,2.95460468,23.2697029748,0.28757017572,0.512641232197,0.0,-10.429469911,3.24099536722,3.05188386852,0.0769236761268,0.0912378256109,0.0,437.13087292768904,439.2348190393201,438.23024924155266,439.26666546857604 80 | 4.739999999999987,-15.1299322138,3.15339095916,23.4504744977,0.291859185485,0.518773567234,0.0,-10.8109165612,3.38482366754,2.97175077701,0.0574496751035,0.118109500883,0.0,487.4790195757379,489.49122768690756,490.03043404726765,488.58353036345045 81 | 4.799999999999986,-15.7891995928,3.36025729145,23.6244836997,0.295067466282,0.526064364529,0.0,-11.1653484288,3.51027869837,2.8268040641,0.0440136523272,0.127568977986,0.0,470.09156846934127,470.8366424526158,472.166430771125,471.1300323039644 82 | 4.859999999999984,-16.4703140417,3.5747745116,23.7901591607,0.298122972008,0.533577416165,0.0,-11.5391037458,3.63973027253,2.69370684895,0.0620368526141,0.120048395203,0.0,479.3753109715376,478.81628004628044,479.47026393027465,480.83945866132945 83 | 4.919999999999983,-17.1726411845,3.79628100964,23.9452765788,0.301393809746,0.540663503305,0.0,-11.8727293703,3.74343286759,2.47549003733,0.0319140384217,0.113896065993,0.0,452.9520960980856,452.46908307439776,455.0367005982468,452.61968125390075 84 | 4.979999999999982,-17.8949320234,4.02380088282,24.086632853,0.303185510912,0.547648398243,0.0,-12.2046740505,3.84008732361,2.23513842418,0.0254046160294,0.121024663241,0.0,448.4021655681537,448.9930812747205,448.21725733016217,447.6879747345593 85 | 5.0399999999999805,-18.6365512045,4.25662772867,24.2120578262,0.305475809994,0.554722921445,0.0,-12.5172127256,3.92039812138,1.94472691029,0.0559340776622,0.110814639798,0.0,432.74766747471404,431.8998624800584,430.4172714516381,432.9950869466183 86 | 5.099999999999979,-19.3982608792,4.49471933228,24.3215202342,0.308915375145,0.561738542279,0.0,-12.8741483567,4.0154827692,1.70247670592,0.0599730408047,0.126967371455,0.0,452.78340369816084,454.093355743747,453.741277321829,454.06772233720335 87 | 5.159999999999978,-20.1816549518,4.73843730874,24.4162091699,0.313418962269,0.569211959327,0.0,-13.2400331278,4.10791066338,1.4521073322,0.0949977982559,0.119244278577,0.0,455.75278926735814,455.1484886484652,452.6515633959164,455.46649135500155 88 | 5.219999999999977,-20.9859339882,4.98697825131,24.493625065,0.31884727824,0.57642573984,0.0,-13.5706213164,4.17640803902,1.12722071848,0.0794002015689,0.12212922554,0.0,430.4498956429034,430.7076026693215,429.4829246555384,428.1644538927969 89 | 5.279999999999975,-21.8126140975,5.24064798975,24.5546293427,0.323647281973,0.584405956432,0.0,-13.9862763331,4.27851419277,0.903913245048,0.0811728704454,0.148803864149,0.0,472.07428904727976,474.1435621319983,472.92189733313427,473.06318552737645 90 | 5.339999999999974,-22.6630343243,5.49962454301,24.5997744073,0.328630677825,0.593738116679,0.0,-14.3622559762,4.35335868331,0.598996218388,0.0865782412424,0.166403858638,0.0,445.7727731786365,447.2317618301886,448.72116896579763,449.1654735808098 91 | 5.399999999999973,-23.534870444,5.76228730323,24.6242890364,0.333645528998,0.603885920964,0.0,-14.7004261342,4.40145772056,0.216676672273,0.0767922972308,0.174083733036,0.0,419.93268200044497,420.62778191679007,422.5459976375481,421.7071932701315 92 | 5.4599999999999715,-24.4293688333,6.02868674276,24.6283780413,0.33818995238,0.614916635609,0.0,-15.1172367098,4.47755084876,-0.0829599098399,0.0735502369758,0.19841145412,0.0,458.54074929321234,460.5010454492472,460.186884370219,459.9343683227174 93 | 5.51999999999997,-25.3501564494,6.29994763999,24.6153726159,0.343148616933,0.627048243677,0.0,-15.5764675364,4.5632590544,-0.353842997584,0.0964523138446,0.208839457129,0.0,478.40355412702013,479.2339649529759,476.76252411633163,478.514726426607 94 | 5.579999999999969,-26.2972924689,6.57546999557,24.5836768636,0.348969714474,0.640399997308,0.0,-15.9957280488,4.61968823126,-0.705566969251,0.0981260446736,0.241162204189,0.0,453.03815793314396,455.6774081045489,455.75675138146835,455.89835048588 95 | 5.639999999999968,-27.2691200871,6.85383698416,24.5294490306,0.354785378814,0.65450066611,0.0,-16.3995694388,4.6580203377,-1.10485134634,0.0944184871594,0.218434721072,0.0,445.3533086638758,443.54404532033357,443.09081824768907,442.7938187911824 96 | 5.699999999999966,-28.2640039245,7.13362942125,24.4490002465,0.360594614279,0.667079274768,0.0,-16.7647147361,4.66733512797,-1.57893803441,0.1012341711,0.179425046043,0.0,419.71190414981896,416.35532215738186,419.536329455171,420.1365903844573 97 | 5.759999999999965,-29.2827697871,7.414505673,24.3416305695,0.366783113157,0.677979010845,0.0,-17.195476419,4.6939289008,-2.00285232023,0.106704242408,0.185792374972,0.0,448.1019890423402,448.6511035546932,446.8119274922587,447.26703649656605 98 | 5.819999999999964,-30.3295712481,7.69756478965,24.2104624976,0.373844904574,0.689321323685,0.0,-17.6987456839,4.73974472777,-2.37322401986,0.13362495107,0.194839268009,0.0,477.7802671495539,478.50172762415093,477.9547505051865,480.01397700132344 99 | 5.879999999999963,-31.4065493325,7.98295272818,24.0560796267,0.381606591095,0.700624670337,0.0,-18.2013811451,4.77146362405,-2.77675317539,0.119082181793,0.170630067367,0.0,476.67166399191575,474.8485669726207,476.2269613546244,475.1267346569484 100 | 5.939999999999961,-32.5131711332,8.26957807681,23.8759330243,0.388940838301,0.710808496571,0.0,-18.6872994703,4.78100266007,-3.23163609628,0.127894958013,0.167870317149,0.0,465.64483786058804,465.4549564937331,463.78749670462065,464.4922732294994 101 | 5.99999999999996,-33.6498058708,8.5566804883,23.6684088282,0.396961956794,0.721109264691,0.0,-19.2017052757,4.78715329769,-3.68972837043,0.143281861292,0.178369199007,0.0,474.11164146740595,474.9441380684188,472.6207241126396,473.8208759730335 102 | 6.059999999999959,-34.8155129749,8.84285724506,23.4304773771,0.405784452602,0.732423155386,0.0,-19.6568632579,4.75019459388,-4.24461802721,0.153655826785,0.203617378712,0.0,441.88066800653763,443.99230545686754,441.7693022990775,442.644640803729 103 | 6.1199999999999575,-36.0087888803,9.12638916432,23.1584935035,0.415323114846,0.744970962702,0.0,-20.1204974301,4.69873279111,-4.82513645407,0.16791461789,0.218333539087,0.0,441.47251608283926,442.72881410881627,440.423839848591,441.62626980810103 104 | 6.179999999999956,-37.2292551389,9.40597907408,22.8500145747,0.425526666565,0.757629581073,0.0,-20.563386278,4.61866128399,-5.4609865483,0.174030800185,0.189248883122,0.0,428.22695401645296,425.7891402782711,426.6000726863585,427.15129940579294 105 | 6.239999999999955,-38.4814736695,9.68239582153,22.507289695,0.435996181396,0.769383243034,0.0,-21.1778117879,4.59218733157,-5.96867066168,0.175397255403,0.206644561935,0.0,496.9757324826675,498.28246654838836,497.8239744117287,497.94970307368004 106 | 6.299999999999954,-39.768897719,9.95581419285,22.1313243721,0.446762926989,0.781458577134,0.0,-21.7373787066,4.51865775354,-6.5684955049,0.186503707161,0.18741907674,0.0,473.9298104074963,472.4856330008013,472.86157953431905,473.74568446366436 107 | 6.359999999999952,-41.089867922,10.2240642205,21.7181216629,0.457706958248,0.792526900095,0.0,-22.2964127526,4.41974226005,-7.20981877324,0.1725965563,0.177820050457,0.0,470.0061276200216,469.29206477655634,469.9713108788579,468.9216224394298 108 | 6.419999999999951,-42.4438330971,10.4853250972,21.264680422,0.468553755835,0.803288478838,0.0,-22.8377615165,4.2855542426,-7.90958714394,0.193488492085,0.182269400022,0.0,457.5610518877172,457.94535932783197,456.0190244891861,457.7160287929873 109 | 6.47999999999995,-43.8286268726,10.7369217902,20.7665201136,0.480583334261,0.813803901986,0.0,-23.3247590927,4.09750378037,-8.69988661707,0.21171347405,0.155113203905,0.0,428.1439982077859,425.8559682562582,426.28805494553444,427.8843573987843 110 | 6.5399999999999485,-45.2453855727,10.9774466961,20.2218976198,0.493462955007,0.823405706431,0.0,-23.9029901408,3.91585962384,-9.45927434081,0.21997526172,0.168384363568,0.0,457.4620861453787,458.54162229838676,458.46997234806935,459.17014142222854 111 | 6.599999999999947,-46.6970969668,11.2060966827,19.6304648917,0.506866402127,0.833775036143,0.0,-24.4898725326,3.70119223526,-10.2605268211,0.229460143406,0.180469112816,0.0,454.56869100424274,455.56447399262237,454.77793919561356,455.5855626609819 112 | 6.659999999999946,-48.1848166578,11.421023719,18.9900748248,0.520490269199,0.845333584318,0.0,-25.1030224811,3.45780089693,-11.0917484119,0.221793067272,0.209792379955,0.0,458.46624222257276,460.8256151841734,459.1495530610448,458.58986580856015 113 | 6.719999999999945,-49.7078235202,11.6193879159,18.2970886214,0.534590067254,0.857911865611,0.0,-25.6665369414,3.14874909014,-12.0135030913,0.253162121599,0.209328059608,0.0,432.2914841727009,432.29695293110376,430.54693095853077,433.25006569256243 114 | 6.779999999999943,-51.2691133857,11.7997573019,17.5505270421,0.549787358208,0.870226833431,0.0,-26.3778457871,2.85667629605,-12.8793614645,0.253538580506,0.195519134622,0.0,486.12508747552204,485.12587949113174,484.65628622224546,484.7431565559874 115 | 6.839999999999942,-52.873086049,11.9610286452,16.7502631548,0.565056038524,0.881552875725,0.0,-27.0899968882,2.51162872756,-13.8036152428,0.25628979123,0.169762382732,0.0,482.48217587619786,480.5634180081794,480.65986284753177,480.92810394246544 116 | 6.899999999999941,-54.5204832847,12.1001470048,15.8931600481,0.581266574317,0.8913428362,0.0,-27.8261626715,2.11757691167,-14.7741465984,0.288987717275,0.144804971939,0.0,484.6987709238556,482.8407576229112,482.50490541797905,485.0344038906214 117 | 6.95999999999994,-56.2103091119,12.2128641926,14.9746656969,0.598501112665,0.900012378407,0.0,-28.5061194833,1.63112830857,-15.84919499,0.283510496392,0.14386048514,0.0,452.71686586220125,452.65988644568125,453.3213961537589,452.962718381477 118 | 7.019999999999938,-57.9438534959,12.2954587709,13.9914885938,0.615281247347,0.908262747736,0.0,-29.2833124058,1.11223817834,-16.931502911,0.270622033955,0.120117313838,0.0,478.2908917999423,476.4936101338386,478.3376554376629,477.4274852116085 119 | -------------------------------------------------------------------------------- /image/final_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pfoy/Quadcopter-Deep-Reinforcement-Learning/0f7ad0200db1fc87729ce65ab95ed659c7929bca/image/final_result.png -------------------------------------------------------------------------------- /physics_sim.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import csv 3 | 4 | 5 | def C(x): 6 | return np.cos(x) 7 | 8 | 9 | def S(x): 10 | return np.sin(x) 11 | 12 | 13 | def earth_to_body_frame(ii, jj, kk): 14 | # C^b_n 15 | R = [[C(kk) * C(jj), C(kk) * S(jj) * S(ii) - S(kk) * C(ii), C(kk) * S(jj) * C(ii) + S(kk) * S(ii)], 16 | [S(kk) * C(jj), S(kk) * S(jj) * S(ii) + C(kk) * C(ii), S(kk) * S(jj) * C(ii) - C(kk) * S(ii)], 17 | [-S(jj), C(jj) * S(ii), C(jj) * C(ii)]] 18 | return np.array(R) 19 | 20 | 21 | def body_to_earth_frame(ii, jj, kk): 22 | # C^n_b 23 | return np.transpose(earth_to_body_frame(ii, jj, kk)) 24 | 25 | 26 | class PhysicsSim(): 27 | def __init__(self, init_pose=None, init_velocities=None, init_angle_velocities=None, runtime=5.): 28 | self.init_pose = init_pose 29 | self.init_velocities = init_velocities 30 | self.init_angle_velocities = init_angle_velocities 31 | self.runtime = runtime 32 | 33 | self.gravity = -9.81 # m/s 34 | self.rho = 1.2 35 | self.mass = 0.958 # 300 g 36 | self.dt = 1 / 50.0 # Timestep 37 | self.C_d = 0.3 38 | self.l_to_rotor = 0.4 39 | self.propeller_size = 0.1 40 | width, length, height = .51, .51, .235 41 | self.dims = np.array([width, length, height]) # x, y, z dimensions of quadcopter 42 | self.areas = np.array([length * height, width * height, width * length]) 43 | I_x = 1 / 12. * self.mass * (height**2 + width**2) 44 | I_y = 1 / 12. * self.mass * (height**2 + length**2) # 0.0112 was a measured value 45 | I_z = 1 / 12. * self.mass * (width**2 + length**2) 46 | self.moments_of_inertia = np.array([I_x, I_y, I_z]) # moments of inertia 47 | 48 | env_bounds = 300.0 # 300 m / 300 m / 300 m 49 | self.lower_bounds = np.array([-env_bounds / 2, -env_bounds / 2, 0]) 50 | self.upper_bounds = np.array([env_bounds / 2, env_bounds / 2, env_bounds]) 51 | 52 | self.reset() 53 | 54 | def reset(self): 55 | self.time = 0.0 56 | self.pose = np.array([0.0, 0.0, 10.0, 0.0, 0.0, 0.0]) if self.init_pose is None else np.copy(self.init_pose) 57 | self.v = np.array([0.0, 0.0, 0.0]) if self.init_velocities is None else np.copy(self.init_velocities) 58 | self.angular_v = np.array([0.0, 0.0, 0.0]) if self.init_angle_velocities is None else np.copy(self.init_angle_velocities) 59 | self.linear_accel = np.array([0.0, 0.0, 0.0]) 60 | self.angular_accels = np.array([0.0, 0.0, 0.0]) 61 | self.prop_wind_speed = np.array([0., 0., 0., 0.]) 62 | self.done = False 63 | 64 | def find_body_velocity(self): 65 | body_velocity = np.matmul(earth_to_body_frame(*list(self.pose[3:])), self.v) 66 | return body_velocity 67 | 68 | def get_linear_drag(self): 69 | linear_drag = 0.5 * self.rho * self.find_body_velocity()**2 * self.areas * self.C_d 70 | return linear_drag 71 | 72 | def get_linear_forces(self, thrusts): 73 | # Gravity 74 | gravity_force = self.mass * self.gravity * np.array([0, 0, 1]) 75 | # Thrust 76 | thrust_body_force = np.array([0, 0, sum(thrusts)]) 77 | # Drag 78 | drag_body_force = -self.get_linear_drag() 79 | body_forces = thrust_body_force + drag_body_force 80 | 81 | linear_forces = np.matmul(body_to_earth_frame(*list(self.pose[3:])), body_forces) 82 | linear_forces += gravity_force 83 | return linear_forces 84 | 85 | def get_moments(self, thrusts): 86 | thrust_moment = np.array([(thrusts[3] - thrusts[2]) * self.l_to_rotor, 87 | (thrusts[1] - thrusts[0]) * self.l_to_rotor, 88 | 0])# (thrusts[2] + thrusts[3] - thrusts[0] - thrusts[1]) * self.T_q]) # Moment from thrust 89 | 90 | drag_moment = self.C_d * 0.5 * self.rho * self.angular_v * np.absolute(self.angular_v) * self.areas * self.dims * self.dims 91 | moments = thrust_moment - drag_moment # + motor_inertia_moment 92 | return moments 93 | 94 | def calc_prop_wind_speed(self): 95 | body_velocity = self.find_body_velocity() 96 | phi_dot, theta_dot = self.angular_v[0], self.angular_v[1] 97 | s_0 = np.array([0., 0., theta_dot * self.l_to_rotor]) 98 | s_1 = -s_0 99 | s_2 = np.array([0., 0., phi_dot * self.l_to_rotor]) 100 | s_3 = -s_2 101 | speeds = [s_0, s_1, s_2, s_3] 102 | for num in range(4): 103 | perpendicular_speed = speeds[num] + body_velocity 104 | self.prop_wind_speed[num] = perpendicular_speed[2] 105 | 106 | def get_propeler_thrust(self, rotor_speeds): 107 | '''calculates net thrust (thrust - drag) based on velocity 108 | of propeller and incoming power''' 109 | thrusts = [] 110 | for prop_number in range(4): 111 | V = self.prop_wind_speed[prop_number] 112 | D = self.propeller_size 113 | n = rotor_speeds[prop_number] 114 | J = V / n * D 115 | # From http://m-selig.ae.illinois.edu/pubs/BrandtSelig-2011-AIAA-2011-1255-LRN-Propellers.pdf 116 | C_T = max(.12 - .07*max(0, J)-.1*max(0, J)**2, 0) 117 | thrusts.append(C_T * self.rho * n**2 * D**4) 118 | return thrusts 119 | 120 | def next_timestep(self, rotor_speeds): 121 | self.calc_prop_wind_speed() 122 | thrusts = self.get_propeler_thrust(rotor_speeds) 123 | self.linear_accel = self.get_linear_forces(thrusts) / self.mass 124 | 125 | position = self.pose[:3] + self.v * self.dt + 0.5 * self.linear_accel * self.dt**2 126 | self.v += self.linear_accel * self.dt 127 | 128 | moments = self.get_moments(thrusts) 129 | 130 | self.angular_accels = moments / self.moments_of_inertia 131 | angles = self.pose[3:] + self.angular_v * self.dt + 0.5 * self.angular_accels * self.angular_accels * self.dt**2 132 | angles = (angles + 2 * np.pi) % (2 * np.pi) 133 | self.angular_v = self.angular_v + self.angular_accels * self.dt 134 | 135 | new_positions = [] 136 | for ii in range(3): 137 | if position[ii] <= self.lower_bounds[ii]: 138 | new_positions.append(self.lower_bounds[ii]) 139 | self.done = True 140 | elif position[ii] > self.upper_bounds[ii]: 141 | new_positions.append(self.upper_bounds[ii]) 142 | self.done = True 143 | else: 144 | new_positions.append(position[ii]) 145 | 146 | self.pose = np.array(new_positions + list(angles)) 147 | self.time += self.dt 148 | if self.time > self.runtime: 149 | self.done = True 150 | return self.done -------------------------------------------------------------------------------- /task.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from physics_sim import PhysicsSim 3 | 4 | class Task(): 5 | """Task (environment) that defines the goal and provides feedback to the agent. 6 | The task to be learned is how to takeoff. 7 | The quadcopter has an initial position of (0, 0, 0) and a target position of (0, 0, 20) 8 | """ 9 | 10 | def __init__(self, init_pose=None, init_velocities=None, 11 | init_angle_velocities=None, runtime=5., target_pos=None): 12 | """Initialize a Task object. 13 | Params 14 | ====== 15 | init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles 16 | init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions 17 | init_angle_velocities: initial radians/second for each of the three Euler angles 18 | runtime: time limit for each episode 19 | target_pos: target/goal (x,y,z) position for the agent 20 | """ 21 | # Simulation 22 | self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) 23 | self.action_repeat = 3 24 | 25 | self.state_size = self.action_repeat * 6 26 | self.action_low = 0 27 | self.action_high = 900 28 | self.action_size = 4 29 | 30 | # Goal 31 | self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.]) 32 | 33 | def get_reward(self): 34 | """Uses current pose of sim to return reward.""" 35 | # this is the original, commented out 36 | # reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum() 37 | 38 | # new reward function, start reward at 0 39 | reward = 0. 40 | # calculate the coordinate distance from the target position 41 | dist_x = abs(self.sim.pose[0] - self.target_pos[0]) 42 | dist_y = abs(self.sim.pose[1] - self.target_pos[1]) 43 | dist_z = abs(self.sim.pose[2] - self.target_pos[2]) 44 | # create penalty, starting with 0.03 45 | penalty = 0.3*(np.sqrt((dist_x**2) + (dist_y**2) + (dist_z**2))) 46 | # add bonus 47 | bonus = 10. 48 | # calculate reward 49 | reward = reward + bonus - penalty 50 | return reward 51 | 52 | 53 | 54 | def step(self, rotor_speeds): 55 | """Uses action to obtain next state, reward, done.""" 56 | reward = 0 57 | pose_all = [] 58 | for _ in range(self.action_repeat): 59 | done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities 60 | reward += self.get_reward() 61 | pose_all.append(self.sim.pose) 62 | next_state = np.concatenate(pose_all) 63 | return next_state, reward, done 64 | 65 | def reset(self): 66 | """Reset the sim to start a new episode.""" 67 | self.sim.reset() 68 | state = np.concatenate([self.sim.pose] * self.action_repeat) 69 | return state --------------------------------------------------------------------------------