├── CMakeLists.txt
├── README.md
├── launch
└── rl_controller.launch
├── notebooks
├── Experimentation.ipynb
├── RL-Quadcopter.html
├── RL-Quadcopter.ipynb
└── images
│ ├── quadcopter_tumble.mp4
│ └── quadcopter_tumble.png
├── package.xml
├── scripts
├── drone_sim
└── rl_controller_node
├── setup.py
├── sim
└── README
├── src
└── quad_controller_rl
│ ├── agents
│ ├── __init__.py
│ ├── base_agent.py
│ └── policy_gradients.py
│ └── tasks
│ ├── __init__.py
│ ├── base_task.py
│ ├── hover.py
│ ├── landing.py
│ └── takeoff.py
└── srv
└── SetPose.srv
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(quad_controller_rl)
3 |
4 | ###########################
5 | ## General configuration ##
6 | ###########################
7 |
8 | ## Add support for C++11, supported in ROS Kinetic and newer
9 | # add_definitions(-std=c++11)
10 |
11 | ##################
12 | ## Catkin Tools ##
13 | ##################
14 |
15 | ## Find catkin macros and libraries
16 | find_package(catkin REQUIRED
17 | message_generation
18 | std_msgs
19 | geometry_msgs
20 | std_srvs
21 | )
22 |
23 | ##################
24 | ## Python setup ##
25 | ##################
26 |
27 | ## Uncomment this if the package has a setup.py. This macro ensures
28 | ## modules and global scripts declared therein get installed
29 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
30 | catkin_python_setup()
31 |
32 | ################################################
33 | ## Declare ROS messages, services and actions ##
34 | ################################################
35 |
36 | ## Add messages to generate, defined in the 'msg' folder
37 | # add_message_files(
38 | # FILES
39 | # EulerAngles.msg
40 | # )
41 |
42 | ## Add services to generate, defined in the 'srv' folder
43 | add_service_files(
44 | FILES
45 | SetPose.srv
46 | )
47 |
48 | ## Add actions to generate, defined in the 'action' folder
49 | # add_action_files(
50 | # FILES
51 | # Action1.action
52 | # )
53 |
54 | ## Generate added messages and services, with any dependencies listed here
55 | generate_messages(
56 | DEPENDENCIES
57 | geometry_msgs
58 | std_srvs
59 | )
60 |
61 | ################################################
62 | ## Declare ROS dynamic reconfigure parameters ##
63 | ################################################
64 |
65 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
66 | # generate_dynamic_reconfigure_options(
67 | # cfg/params.cfg
68 | # )
69 |
70 | #################################
71 | ## Generate CMake config files ##
72 | #################################
73 | ## Declare things to be passed to dependent projects
74 | ## INCLUDE_DIRS: uncomment this if your package contains header files
75 | ## LIBRARIES: libraries you create in this project that dependent projects also need
76 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
77 | ## DEPENDS: system dependencies of this project that dependent projects also need
78 | catkin_package(
79 | DEPENDS message_runtime
80 | DEPENDS std_msgs
81 | DEPENDS geometry_msgs
82 | DEPENDS std_srvs
83 | )
84 |
85 | ###########
86 | ## Build ##
87 | ###########
88 |
89 | ## Specify additional header file locations
90 | ## Declare C++ libraries/executables
91 | ## Add dependencies/links for above libraries/executables
92 |
93 | #############
94 | ## Install ##
95 | #############
96 |
97 | ## Note: All install targets should use catkin DESTINATION variables
98 | ## See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
99 |
100 | #############
101 | ## Testing ##
102 | #############
103 |
104 | ## Add folders to be run by python nosetests
105 | # catkin_add_nosetests(tests)
106 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Drone Flight Controller
2 |
3 | ## Overview
4 |
5 | This is the code for [this](https://youtu.be/PngA5YLFuvU) video on Youtube by Siraj Raval on Deep Determinstic Policy Gradients. Its apart of week 9 of 10 of the Move 37 Course at School of AI.
6 |
7 | # Table of Contents
8 |
9 | - [Install](#install)
10 | - [Download](#download)
11 | - [Develop](#develop)
12 | - [Submit](#submit)
13 |
14 |
15 | # Install
16 |
17 | This project uses ROS (Robot Operating System) as the primary communication mechanism between your agent and the simulation. You can either install it on your own machine ("native install").
18 |
19 | ## ROS Virtual Machine
20 |
21 | Download the compressed VM disk image and unzip it:
22 |
23 | - Compressed VM Disk Image: [RoboVM_V2.1.0.zip](https://s3-us-west-1.amazonaws.com/udacity-robotics/Virtual+Machines/Lubuntu_071917/RoboVM_V2.1.0.zip)
24 | - MD5 checksum: `MD5(Ubuntu 64-bit Robo V2.1.0.ova)= 95bfba89fbdac5f2c0a2be2ae186ddbb`
25 |
26 | You will need a Virtual Machine player to run the VM, such as VMWare or VirtualBox:
27 |
28 | - [VMWare](http://www.vmware.com/): If you use a Windows/Linux system, you can get [Workstation Player](https://www.vmware.com/products/workstation-player.html) for free, or if you're on a Mac, you can get a trial of [Fusion](https://www.vmware.com/products/fusion.html).
29 | - [VirtualBox](https://www.virtualbox.org/): Download and install the appropriate version for your system.
30 |
31 | Open your VM player, and then "Open" / "Import" the VM disk image that you just unzipped (the `.ova` file).
32 |
33 | Configure the settings for your VM to allocate at least 2 processors and 4GB of RAM (more the merrier!). Now launch the VM, and follow the on-screen instructions for one-time setup steps.
34 |
35 | - Username: `robond`
36 | - Password: `robo-nd`
37 |
38 | To open a terminal in your VM, press `Ctrl+Alt+T`. If prompted "Do you want to source ROS?", answer `y` (yes). This is where you will execute your project code.
39 |
40 | ## ROS Native Install
41 |
42 | If you choose to install ROS (Robot Operating System) on your own machine, it is recommended that you use Ubuntu 16.04 LTS as your operating system. To install ROS, please follow the instructions here: [ROS Installation](http://wiki.ros.org/kinetic/Installation)
43 |
44 | # Download
45 |
46 | ## Project Code
47 |
48 | On the machine where you have installed ROS (a VM, or your local machine), create a directory named `catkin_ws`, and inside it create a subdirectory named `src`. If you're using a VM, you can also share a folder on your file-system between the host and VM. That might make it easier for you to prepare your report and submit your project for review.
49 |
50 | Now clone this repository or download it inside the `src` directory. This is where you will develop your project code. Your folder structure should look like the following (ROS has a fairly complicated build system, as you will see!):
51 |
52 | ```
53 | - ~/catkin_ws/
54 | - src/
55 | - RL-Quadcopter/
56 | - quad_controller_rl/
57 | - ...
58 | ```
59 |
60 | The root of this structure (`catkin_ws`) is a [catkin workspace](http://wiki.ros.org/catkin/workspaces), which you can use to organize and work on all your ROS-based projects (the name `catkin_ws` is not mandatory - you can change it to anything you want).
61 |
62 | ## Simulator
63 |
64 | Download the Udacity Quadcopter Simulator, nicknamed **DroneSim**, for your host computer OS [here](https://github.com/udacity/RoboND-Controls-Lab/releases).
65 |
66 | To start the simulator, simply run the downloaded executable file. You may need to run the simulator _after_ the `roslaunch` step mentioned below in the Run section, so that it can connect to a running ROS master.
67 |
68 | _Note: If you are using a Virtual Machine (VM), you cannot run the simulator inside the VM. You have to download and run the simulator for your **host operating system** and connect it to your VM (see below)._
69 |
70 | ### Connecting the Simulator to a VM
71 |
72 | If you are running ROS in a VM, there are a couple of steps necessary to make sure it can communicate with the simulator running on your host system. If not using a VM, these steps are not needed.
73 |
74 | #### Enable Networking on VM
75 |
76 | - **VMWare**: The default setting should work. To verify, with the VM runnning, go to the Virtual Machine menu > Network Adapter. NAT should be selected.
77 | - **VirtualBox**:
78 | 1. In the VirtualBox Manager, go to Global Tools (top-right corner) > Host Network Manager.
79 | 2. Create a new Host-only Network. You can leave the default settings, e.g. Name = "vboxnet0", Ipv4 Address/Mask = "192.168.56.1/24", and DHCP Server enabled.
80 | 3. Switch back to Machine Tools, and with your VM selected, open its Settings.
81 | 4. Go to the Network tab, change "Attached to" (network type) to "Host-only Adapter", and pick "vboxnet0" from the "Name" dropdown.
82 | 5. Hit Ok to save, and (re)start the VM.
83 |
84 | #### Obtain IP Addresses for Host and VM
85 |
86 | In a terminal on your host computer, run `ifconfig`. It will list all the network interfaces available, both physical and virtual. There should be one named something like `vmnet` or `vboxnet`. Note the IP address (`inet` or `inet addr`) mentioned for that interface, e.g. `192.168.56.1`. This is your **Host IP address**.
87 |
88 | Do the same inside the VM. Here the interface may have a different name, but the IP address should have a common prefix. Note down the complete IP address, e.g. `192.168.56.101` - this your **VM IP address**.
89 |
90 | #### Edit Simulator Settings
91 |
92 | Inside the simulator's `_Data` or `/Contents` folder (on Mac, right-click the app > Show Package Contents), edit `ros_settings.txt`:
93 |
94 | - Set `vm-ip` to the **VM IP address** and set `vm-override` to `true`.
95 | - Set `host-ip` to the **Host IP address** and set `host-override` to `true`.
96 |
97 | The host and/or VM's IP address can change when it is restarted. If you are experiencing connectivity problems, be sure to check that the actual IP addresses match what you have in `ros_settings.txt`.
98 |
99 |
100 | # Develop
101 |
102 | Starter code is provided in `quad_controller_rl/` with all the Python modules (`.py` files) under the `src/quad_controller_rl/` package, and the main project notebook under `notebooks/`. Take a look at the files there, but you do not have to make any changes to the code at this point. Complete the following two steps first (**Build** and **Run**), to ensure your ROS installation is working correctly.
103 |
104 | ## Build
105 |
106 | To prepare your code to run with ROS, you will first need to build it. This compiles and links different modules ("ROS nodes") needed for the project. Fortunately, you should only need to do this once, since changes to Python scripts don't need recompilation.
107 |
108 | - Go to your catkin workspace (`catkin_ws/`):
109 |
110 | ```bash
111 | $ cd ~/catkin_ws/
112 | ```
113 |
114 | - Build ROS nodes:
115 |
116 | ```bash
117 | $ catkin_make
118 | ```
119 |
120 | - Enable command-line tab-completion and some other useful ROS utilities:
121 |
122 | ```bash
123 | $ source devel/setup.bash
124 | ```
125 |
126 | ## Run
127 |
128 | To run your project, start ROS with the `rl_controller.launch` file:
129 |
130 | ```bash
131 | $ roslaunch quad_controller_rl rl_controller.launch
132 | ```
133 |
134 | You should see a few messages on the terminal as different nodes get spun up. Now you can run the simulator, which is a separate Unity application (note that you must start ROS first, and then run the simulator). Once the simulator initializes itself, you should start seeing additional messages in your ROS terminal, indicating a new episode starting every few seconds. The quadcopter in the simulation should show its blades running as it gets control inputs from the agent, and it should reset at the beginning of each episode.
135 |
136 | Tip: If you get tired of this two-step startup process, edit the `quad_controller_rl/scripts/drone_sim` script and enter a command that runs the simulator application. It will then be launched automatically with ROS!
137 |
138 | ## Implement
139 |
140 | Once you have made sure ROS and the simulator are running without any errors, and that they can communicate with each other, try modifying the code in `agents/policy_search.py` - this is a sample agent that runs by default (e.g. add a `print` statement). Every time you make a change, you will need to stop the simulator (press `Esc` with the simulator window active), and shutdown ROS (press `Ctrl+C` in the terminal). Save your change, and `roslaunch` again.
141 |
142 | Now you should be ready to start coding! Open the project notebook for further instructions (assuming you are in your catkin workspace):
143 |
144 | ```bash
145 | $ jupyter notebook src/RL-Quadcopter/quad_controller_rl/notebooks/RL-Quadcopter.ipynb
146 | ```
147 |
148 | ## Credits
149 |
150 | Credits for this code go to [sksq96](https://github.com/sksq96/deep-rl-quadcopter). I've merely created a wrapper to get people started.
151 |
--------------------------------------------------------------------------------
/launch/rl_controller.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/notebooks/Experimentation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "import numpy as np\n",
12 | "\n",
13 | "import tensorflow as tf \n",
14 | "import numpy as np \n",
15 | "import tensorflow.contrib.slim as slim\n",
16 | "import gym\n",
17 | "import random\n",
18 | "from collections import deque\n",
19 | "from numpy.random import normal"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 8,
25 | "metadata": {
26 | "collapsed": true
27 | },
28 | "outputs": [],
29 | "source": [
30 | "discount_factor = 0.9\n",
31 | "\n",
32 | "ob_len = 3\n",
33 | "action_len = 1\n",
34 | "hidden_size = 16\n",
35 | "\n",
36 | "tau = 0.05\n",
37 | "learn_rate = 1e-3\n",
38 | "\n",
39 | "replay_memory = deque(maxlen=1000000)\n",
40 | "\n",
41 | "noise_std = 4\n",
42 | "noise_rate = 0.995"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 75,
48 | "metadata": {},
49 | "outputs": [
50 | {
51 | "data": {
52 | "text/plain": [
53 | "1.071858529854959"
54 | ]
55 | },
56 | "execution_count": 75,
57 | "metadata": {},
58 | "output_type": "execute_result"
59 | }
60 | ],
61 | "source": [
62 | "normal(0, noise_std)"
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": 94,
68 | "metadata": {
69 | "collapsed": true
70 | },
71 | "outputs": [],
72 | "source": [
73 | "import matplotlib.pyplot as plt\n",
74 | "import numpy as np\n",
75 | "\n",
76 | "def plot_rewards(x):\n",
77 | "# with open(output_txt) as f:\n",
78 | "# lines = f.readlines()\n",
79 | "# x = [float(line.split()[0]) for line in lines]\n",
80 | "\n",
81 | " fig = plt.figure()\n",
82 | " ax1 = fig.add_subplot(111)\n",
83 | " ax1.set_title(\"Total reward per episode\")\n",
84 | " ax1.set_xlabel('Episode ->')\n",
85 | " ax1.set_ylabel('Sum Reward ->')\n",
86 | "\n",
87 | " ax1.plot(range(len(x)), x)\n",
88 | " plt.show()\n",
89 | " print('Mean reward over the last 5 episodes:', np.mean(x[-5:]))\n"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 97,
95 | "metadata": {},
96 | "outputs": [
97 | {
98 | "data": {
99 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZcAAAEWCAYAAACqitpwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXeYZGWZ9/+5q6qrqnOaDjPTE5lAGPIQRRQBFxOYUFwF\nVBATq6vu6xo2yL7Lqusa1jX85CcKGMGA4IqBoAjozDDkNKEnpw7Tuaq68vP+cc6pPt1d4VTu7nk+\n11XXVJ2qc85T3T3nPnf63qKUQqPRaDSaUuKq9gI0Go1Gs/DQxkWj0Wg0JUcbF41Go9GUHG1cNBqN\nRlNytHHRaDQaTcnRxkWj0Wg0JUcbF82CQ0T8IqJEpKfaa8kXEdkkIu+q9jryRURuE5FPlviYHxCR\nB0p5TE3l0MZFUxFEJGB7JEVk0vb6nTn2vUxEeiu1Vk3+KKXerZT6z2qvQzN38FR7AZpjA6VUg/Vc\nRPYC1yulqn5XKiIuAKVUssLn9Sil4pU851w6v2bhoz0XzZxARGpF5JsickREDorIl0SkRkTagbuB\n1TZPp11EXiYim0VkVEQOi8hXRcTRzZIZevo3EdkMhIAlItImIneISJ+IHBCRf7UMj7ntJPP5dWbI\n7Tjz9YdF5Kfm84xrsoXqPigiu4Dnze2vE5Gd5j5fybHuL4jIT0TkFyIyISKPW+sy318mIveIyFER\n2S0iH5ix749F5E4RmQCuyvA7+Jr5/ftE5H9ExGe+d5mI9IrITSIyLCJ7RORK274/FZF/Mp93i8jv\nzO80JCIP2T53sog8Yr73rIi8xvZep4jcJyLjIvJXYMWM9W0QkYdEZEREXhKRN2b9RWuqijYumrnC\nTcApwMnAmcArgU8qpYaANwG7lVIN5mMIiAE3Au3Ay4E3ANfncb53AdcAjUAf8CNgDFgNnA28Ebja\n/OyfzfUAvALYDVxoe/2w+dzJml5vfr/TRWQxcBfwCaADGAQ25lj3W4DbgTbgHuCXIuIWETdwH/AX\nYAlwGfAZEXlFmn2bgV+kOfZXgB6M38F6YB3wKdv7KwEv0A28D7hdRFalOc4/AtuBRcBi4HNgGFjg\nN8CvzO/7f4Cf2Y5xCzAMdAEfBN5rHVBEmoD7gVvN414DfE9E1mT6QWmqjFJKP/Sjog9gL3DJjG2H\ngFfZXl8BbDOfXwb05jjmp4CfmM/9gAJ6Mnx2E/AZ2+sVQBCosW17D/Bb8/mHgbvM57sxLqy3ma/7\ngBPzWNP5tvdvAP5ke+0GBoB3ZTjeF2Z83gMMAWdhGLmdMz5/E/Bt275/yPLz8wBRYKlt20XAS7bf\nQRjw296/F/g/5vOfAv9kPv9P4GfA6hnnuBTYB4ht293mz8kPJIGVtve+AjxgPr8WuH/G8W4H/rHa\nf8/6kf6hcy6aqiMignE3vM+2eR+wNMs+JwJfBs4AajEujo/lcdoDtucrMC5ug8ZSAMOrt4oIHgb+\nRUSWYxihXwKfFpHjAZdS6sU81mQ/7xL7a6VUQkQOOV23UiouIofN4zQDK0Vk1PZZN/BAun3TsASo\nAV6w/QwEsOdlBpVSYdvrfeZ+M7kZ+DfgjyISA76llPqK+dn9yrQMtmMsxfj9y4w17sPwZsH4HV04\n4/t5gJEs30lTRXRYTFN1zItNH9Nj7MsxvBkw7vhn8v8DTwLHKaWaMC5mkuZzGU9re34ACACtSqkW\n89GklDrDfP8FjAv1B4CHlRGWC2CEzf6c55rs5z0CLLNemDmejAbVxP55N8YF+7D5HbbZ1t+ilGpU\nSr0pw7lncgTDkBxn279ZKdVu+8wiM7Rlsdw89/QvqNSYUuqjSqkVGKG4fxKRl5mfXT7j49bvuc9c\n37IZ71kcwPC87N+vQSn191m+k6aKaOOimSv8BPhXM1nfCXwW+KH5Xj/QKSINts83AmNKqYCZ1H5f\noSdWSu3BCJX9p4g0iohLRNaKyAXm+wrDiNzIVH7lYeDvbK8LWdO9wFki8noRqcHIQbTl2Od82+c/\niREWexJ4FEBE/t4sHvCIyCkicka2g9l+BjHge8B/i8giMVgmIpfaPlYD/LOIeEXkVRhhrlm5GxG5\nXERWmx7pGJDACHk9ArjMNXrMY78aI+QYBn4N3GQWFpwC2EvUf4WRp3q7GIUeXhE5V0TWOfl+msqj\njYtmrvAvwIsYXsLTGOEkq2/iGYwL8T6zyqgN+BhwvYgEgG8CdxZ5/ncALcA2jKTynRiJZYuHMYzH\nnzO8Jt81KaWOYFRtfQ0jmd8FbM2xzl9gJLpHMLyCtyilEqZxeC1wPkY4aRD4NtCQ6UBp+HsM72Ir\nhlH4HWBPmO/F8G76MAzRe5RSu9Mc5wTgj8AExs/nv5RSfzUNyOuBt2IYxa8Ab7cd4/0YP4N+4DvA\n960DKqVGgL/ByIUdMdf57xgGTzMHkenhT41GM1cRkS8Ai5RS+VTFlerclwHfUErp6iyNI7TnotFo\nNJqSo42LRqPRaEqODotpNBqNpuRoz0Wj0Wg0JeeYbaJctGiRWrlyZbWXodFoNPOKJ5544qhSqiPX\n545Z47Jy5Uq2bs1V9anRaDQaOyKyL/endFhMo9FoNGVAGxeNRqPRlBxtXDQajUZTcrRx0Wg0Gk3J\n0cZFo9FoNCVnwRgXcwzrdnMU66dy76HRaDSacrEgjIs51+KbwGuAE4F3mIObNBqNRlMFFoRxwZh5\n3quU2q2UimKMXL2iymvSaDSaOcV4OMYXf7eNA8Ohsp9roTRRLmX6eNSDwDkzPyQiN2DMLWf58pkD\n8TQajWZhEk8k+cmW/Xz1gZ0MB6Msaanl6nNX5N6xCBaKcXGEUuoW4BaAjRs3asVOjUazoFFK8cft\nA/zHfdvoHQhw7uo2/ul1J7JhaXPZz71QjMshps/e7mFq/rpGo9Ecc7x4eJyb73uRx3qHWL2onluu\nPpNLT+zCmD5dfhaKcXkcWCsiqzCMylXA31Z3SRqNRlN5hoNRvvT77fz08f201NbwuTecyDvPXUGN\nu7Ip9gVhXJRScRG5Efg94Aa+p5R6ocrL0mg0mooRTyT58Zb9fPkPOwhE4rzn/FV89OK1NNfVVGU9\nC8K4ACil7gPuq/Y6NBqNptJs2TPMv977Ai8dGedla9r53BtOYm1XY1XXtGCMi0aj0cwlHtrWz6+f\nOcLHLlnH8va6kh8/EInz0pFxfrhpH/c8fZilLbV8+51ncNmG7orlVbKhjYtGo9GUkLFQjJv+9wV+\n+aRRU/TAS/189W2nccmJXQUfMxxLsHXvCC8cHuP5w+O8cGiMPUNBlAKvx8VHXrWGD75yDbVed6m+\nRtFo46LRaDQl4v4X+/ns3c8xFIzykVet4YrTl/LRnz7F9Xds5UOvPI6PX7oOT56J9XgiyVW3bOLp\nA6MALG2p5aQlTbzx9KVsWNrEqT0ttDf4yvF1ikIbF41GoymSkWCUm379Ar96+jDHdzfyvXefleol\n+fkHzuemX7/At/60i6f2j/L1d5xOR6NzY/C9x/bw9IFRbrr8JC4/dQmt9d5yfY2Soo2LRqPRFMGT\n+0e44Y4nGA1F+ejFa/nwRWvweqa8E3+Nm8+/+RTOWN7KP/3qeV7/P4/wzb89g40r23Iee99QkK/c\nv4NLT+zimvNWzIlcilMWiraYRqPRVIVvPtSLCNxz48v42KXrphkWO1duXMYvP3Q+/ho3V92yiXue\nzt7nrZTi0798jhqXi/97xYZ5ZVhAGxeNRqMpmFgiyabdQ7z6xC5OWpJbUuWkJc3ce+MFnLmilY/f\n9Qy/e/5Ixs/+bOtB/rJriE+99ni6m/2lXHZF0MZFo8nBkbHJai9BM0d5+sAowWiCl69d5Hif5toa\nbn33WZza08zf/eQp/rhtYNZnBsbD/PtvXuTsVW2846z5KbKrjYtGk4WnD4xy3ucf4sn9I9Veypwg\nmVT0DgQIRePVXsqc4NGdR3EJnLfauXEBaPB5+P57zmZ9dyPv/+ETPNZ7dNr7n/v1C4TjSb7w5pNx\nueZXOMxCGxeNJgt/3jEIwK6BQJVXUj0OjU5y1+MH+MhPnuKsmx/gkq88zH/9fke1lzUneLT3KCf3\ntBQksdJcW8MP3nsOqxfVc/3tW9myZxiA37/Qx33P9fHRi9eyuqOh1EuuGLpaTKPJwuY9QwAMTESq\nvJLKkkwqvvC7bdz/Yj97jgYB6Gj0ceG6DjbtHmLP0WPX2FpMhGM8fWCUD7xidcHHaK338oPrzuGq\nW/7Ke297nG+/6wz++VfPc8LiJm64sPDjzgW0cdFoMhCNJ3linxEO6x8PV3k1lWXPUJBb/rybM1e0\ncvW5K7hg7SLWdjYgIlx32+McHju2fh7p2LR7mERSccGajqKO09Ho40fXn8vbvvNXrr51Cy6B7167\nseIqxqVmfq9eoykjzx0aJRxLAtBXhYtpOJbg0798riqGbWDc8NQ+fuk63nvBKtZ1NaZKYbua/QxU\nydjetfUAX3tgBzv7J6pyfjuP9R6ltsbNGStaij5Wd7OfH7/vHNZ2NvCRi9dySk/xx6w22nM5Rukb\nCyMCXU3zr8SxUmzabcTANyxtor8KYbGte0f4yZb9nLmilbee2VPRcw9MGMajM00neXeTn6FglEg8\ngc9TWS2rL/x2G8PBKF97YCfruxp5/SmLef2pS1i1qL6i6wB4ZOcgZ69qK9nPoKe1jj987MJ518+S\nCe25HKP8w8+e4R9/8Wy1lzGn2bxnmHVdDRzf3VSVO/Ud5t35UKDyhm3QNKadjbNvPrrNGxLLu6kU\nQ4EIw8EoH3zlcXzuDSfS6Pfw5ft3cNF//YnXff0R7tp6IK/jxRPJgtdyZGySXYPBvEqQnbBQDAto\nz+WY5WggQrSI/1yVZEf/BO+4ZRP33PgyelpLL12ejngiyRN7h3nzGT001XoYmIiQTKqKloXuHDCM\ny3AwWrFzWgxMRPB6XDTVzr5EdDYZ3kz/eJhlbZX5fQD0mhV756xq45XrO3n3y1ZxZGyS3zx7hLuf\nOsQnf/4sSine7qAv5N5nDvPJnz9De72PM1a0snFFK2euaOX47kZHwpKP7jRKh1+2prTGZSGhjcsx\nymQsUZWLViG8eHicoWCU5w6OVcy4PH94nGA0wTmr2xgORkkkFUPBaF6Cg8Wyvc/0XKphXMbDdDb6\n0t5JW93i/RX2XHoHDeOypnOqPHdxcy3Xv3w1156/kutu38pn7n6eziY/F63vzHicB17s5+N3Ps1J\nS5vpaally54hfv3MYQDqvW5OX97Kp15zfEp4Mh2P9R5lUYOX47urO5BrLqONyzFKKJpgIhwnHEvg\nr5k7MyDSYV1cD4yEKnbOzbuNEuSzV7Xx1H5D6rx/PFwx46KUYme/cTGtlueSLt8CU2GxvgqHCnsH\nAtR53Sxprp31Xo3bxbfeeQZv/85f+fCPnuTOG87j5J7ZxuEvvUf50I+f5MQlTfzwurNp9NeglOLQ\n6CRP7BvhiX0j/ObZI/zDz57hNx95Oe40nqpSikd7h3jZmkULKoxVanTO5RhlMpoAKh83LwQr53Bg\nuHIyLJv3DLO6o57ORn+q6KGSVVt942EmIkYXfFU8l4lI2nwLGM1/Po+r4lVsvQMBjutoyBiabPB5\n+P67z6K1zst7bnucA8PTb0ae3D/C9XdsZWV7Hbe/xzAsYOQ5elrruOK0pfzbFRv4tys2sK1vgl88\ncTDtebb3T3A0EOECHRLLijYuxyBKqZR8h1UVNJcZrrDnkkgqHt8zzDmr2gHoSuUYKmeIrZDYkmZ/\nVRL6A+PhVG5lJiJCV5O/4uXZvQOBaSGxdHQ2+bn9vWcRSyS59vtbGDH/dl46Ms67v7eFjkYfP7zu\nnKwzUV57cjenL2/hy/dvTytzY+VbLihxMn+hoY3LMUgkniSpjOfzofP8aMA0LsOVMS4vHRlnIhLn\n3NXGvI1FDT5EKuu5WCGxc1e3VzwsFo4lGA/H6cgy3bC7yV/RsFggEufIWDincQFY09nId6/dyMGR\nSd53x1ZeOjLO1bdupt7n4UfXn0NnjvJ7EeGzrz2B/vEI331kz6z3H+09ynEd9SxOE57TTKGNyzFI\nOJZIPa9WM1w+DAcNA3hwZBKlVNnPt8nMt1ieS43bRXu9r6Je3o7+CRY1+Dius4FQNDHtd1ZuUmXI\nGTwXqHwjpaXtdpxDra2zVrbxtbefxhP7R3jd1x8B4EfXn+O4IGTjyjYuO6mb7zy8K/XzAIjEE2ze\nPaxDYg7QxuUYJBS1GZd54LlYd+6ReHLaf/RysXnPMCva66bN0Ohu9lU0LLZjIMC6rgbazfBNJfMu\nUw2Ume/wu5t89I2HK2LsYaoM2YnnYvHakxfzuTecRE9rHXe895y8RSD/8TXHE4kn+doDUyKdT+0f\nZTKW4IK1xUm+HAto43IMMt+My1Agysp2446z3HmXZFLx+N5hzlk1fQRtV2PlcgzJpGJn/wTruhpp\nM43LcKCCxsU0otkq47qa/IRjScYnKyO9v3MgQI1bWNGeXyn6teev5M+fvIgTlzTlfc5Vi+p55znL\n+enjB+g1e44e3XkUt0s4Z3XuEcXHOtq4HINM2oxLJTyBYojEE0xE4py2zNBaKnfF2Pb+CUZDsVRI\nzKKzyV+xsNih0UlC0QTruhppb7A8l8r9ngYDDsJiFS5H7h0IsLK9vuJijh+5eC11NW6+8NttgJFv\nOW1ZC03+/CX2jzW0cTkGsSpgamvcc95zsUJilpBfuZP6Vn/LzDvTriYfRwNRYhVQNbA689d1NdBW\nb1zghyrsubgE2uuzJPSbK2tcdg3mrhQrB+0NPj540XE88NIAv3+hj2cPjuqufIdo43IMEjKTwyva\n6xic46XI1kV1SUstHY2+sofFNu8ZZmlL7azEr3WnXglPb4dZKbbWHharcM5lUYMvbQOhRXcFe38i\n8QT7hoJVMS4A733ZKpY0+/n4nU+TVJRcT2yhoo3LMYgVFlvRXsdQMFqUgF+5sS6q7Q1elrXWljUs\nppRiy57htPH0Sl5Md/RN0NXko7m2hia/hxq3VDihH8kaEgObvlgF8lB7j4ZIqvyS+aXEX+PmE69e\nTzCaoN7rToVoNdnRxuUYxEror2yvR6mpPpK5iJVraK/3sqytrqyeS+9AgKFglHNn5Ftgulhjudkx\nYCTzwei5aKv3psqxK8HAeObufAufx01bvddRWOzXzxzmvM8/mGo+zBcrTFgt4wLwptOXcvryFi4+\noWveD/GqFPqndAwyaeZcVrQbMzDmcpe+FRZrr/exrLWOI2Phsnlam8wZ5uk8lykJmPJe5JNJRe9A\nIGVcANrqfRUOi2XWFbPT2ehzZGwf3XmUI2Nhrv3+Fm7/y968y5d7BwKIOO9xKQcul3DX+8/jq28/\nrWprmG9o43IMMhmzPBcjrzCX9cWGglE8LqGp1sOytloSScWRMoViNu8eorvJz/I0MvJtdV48Lim7\n53JgJEQ4lmRd19SFtL3eWzHvMp5IMhR0Zly6m/2OjO2uwQCn9DRz0fpO/vXeF/jM3c8TjTu/Qegd\nCNDTWlt1gdUatytrHkozHW1cjkGssNhyy7jM4Yqx4UCUtnovIsIyM8lejooxpRSbzXxLOqVbl0vM\nO/Xy/qwsTbG10zwXb8U8l6FgFKWgw8GEUqcSMLsGA5y0pJlbrj6TD73yOH6yZT9X37rZ8XfqHQiw\npopei6YwtHFZAIyHY7zru5sdX3Qnowl8Hlcqrj6nw2LBKO2mxpU1mKoceZc9R4MMTkRm9bfY6Wou\nf6/LTrMTfa0tv9DeUDnjYnmxTjyXriY/RwORrOXZw8EoI6EYx3XU43IJn7zseP77qtN46sAol3/j\nUbb1jWc9RyKp2H20epVimsLRxmUBsL1vgkd7j/LUgVFHnw9FE9R53Xg9LtrqvXO6kXIoGElJoCxu\n9uN2SVkqxh7tNZLN2TqvK9Glv6N/gqUttSk5eDDCYoFInEi8/PpiU9IvzoyLUtnLs3cPztYEu+K0\npdz1/vOIxpO85Vt/Yd9QMOP+B4ZDRONJbVzmIdq4LADGQjEAQhFnUhyGcTHmxHU2+uZ2WCwYTfV6\neNwuFjf7S+65JJOK2/6yl5OWNLF6UX3Gz3U1OUtgF8P2vgnWdk2/kFqNlJXwXgZSopUOwmLNxrqy\nhcZ2DxqGY3XH9J/racta+MUHz2cylsg4NwXsmmJ64uN8QxuXBcDYpGFcAg6Ny2Qsjr/G+NV3zHHj\nMhSIpiRQAJa11pU85/LgtgF2DwZ5/yuOyzpZsLPJz3g4Pk0+p5TEE0l2DwanVYoBKeNaiS79lK5Y\nFrl9C6uCLps68q7BAF63K60a8bK2Os47rp17nzmcsYIs3WhjzfxAG5cFgGVcQg4venbPpaPRx+Ac\nld2PxBMEIvFUWAxgWVstB0ZKGxb7zsO76Gmt5bUburN+LnUxLVPeZd9wiGgiOcu4TOmLVcJzCdNa\nV4PXk/vSkBp3nCVUuGswwKpF9RmrrC4/dQl7h0I8d2gs7fu9AwE6Go2GUs38QhuXBYBlXIJ5hMVq\nvUZZZ2ejn8FApCjp9EAkztFAhLFQjGAkTjSeLIkU+1R3/tRd9LLWOgYnIiWbb7J17zBb943wvpev\nxpOjOa67zL0uO/unNMXstKckYMrvYWYbbzyTtnovNW6hL8vPY/dgcFZIzM5lJy2mxi3c+/ThtO/r\nSrH5i6caJxWRK4HPAScAZyulttre+zRwHZAAPqKU+r25/UzgNqAWuA/4qFJKiYgPuAM4ExgC3q6U\n2luxLzMHSBmXNCNZ0zEZTaTuhjsbfcQSipFQLBV+yYeRYJRzP/8gkTR9CzVu4Z9ffyLXnLcy7+PC\nVBiobZrnYoRXDo6EShKH/86fd9NSV8OVG3tyftYad1wuscbtfelDQO0VFK90Iv1iISJ0Nvoz5qGi\n8ST7hkO89uTFGY/RXFfDK9Z18r/PHuEzrz0Bl83DUcpoKH3T6Uvz+xKaOUFVjAvwPPBm4Dv2jSJy\nInAVcBKwBHhARNYppRLAt4H3AZsxjMtlwG8xDNGIUmqNiFwFfBF4e6W+yFxgPOW5OA2LxVnmNUa0\nWheSgYlwQcalfyJMJJ7kHWcvY21nI7FEknhSEUskufXRPTx7MH24wwlWGGhmWAwM6f1ijUvvQID7\nX+w3ZNW9uf8rdDrIMRTDjoEJlrXVzlpLU60Hj0sqktA/OhHhuCyexkyMRsr0P4/9wyESSZXVcwG4\n/LQlPPBSP1v2DnPu6qlS8P7xCIFIfFaBg2Z+UBXjopR6CUiXPL0C+KlSKgLsEZFe4GwR2Qs0KaU2\nmfvdAbwRw7hcgeEFAfwc+IaIiKrUiLw5QL5hsXAsSW2NVS1mXTAjHJ895ZAW65yXbVjMK9ZNn853\n33NHCIQLHyY1ZM4VmRkWg9L0unz3kd34PC6uPW+Fo883+T34a1xlqxjb2T/B+q7ZBlNEaK33lt1z\nUUoxmEdYDIxQ4UsZelV2pSlDTsclJ3RSW+Pm3mcOTzMuqUoxHRabl8y1nMtS4IDt9UFz21Lz+czt\n0/ZRSsWBMSBtJ5yI3CAiW0Vk6+DgYImXXj3yDYuFonHqUjkXy3MpLJ4fML2lBt9saY4Gn8dxBVs6\nrDt1u0fV0ejD53EVXTE2MB7ml08e4m0bl00zXtkQEbqanEme5Es0blSKrU1jXMDw3sqd0B8NxYgm\nko56XCy6mvwZlZEt45LLc6nzerj0xC5++9yRaQ2ZvXNAsFJTOGUzLiLygIg8n+ZxRbnOmQul1C1K\nqY1KqY0dHQtnBvZY3mGxxJRxMcNihTZSWr016cJKDf4aJoowLkPBKDVuock/dWwRoacE0vvf/8te\n4skk1798VV77Gcal9J7L3qEg8aSalcy3MLr0y5vQn+pxcW5cupt9BKMJJsKxWe/tHgzS2eib1hCa\nictPXcJIKDZNOXnnQIBGvyfruGXN3KVsYTGl1CUF7HYIWGZ73WNuO2Q+n7ndvs9BEfEAzRiJ/WOG\nfMJiiaQiEk+mqsXqvB4afJ6Cy2stz6TBN/tPqdHn4fBo4UZgKBBJ6YrZKVZ6fyIc44eb9vGaDYtT\nytBO6Wry89xBZ0oI+bDDrBRbmyGP1Fbv47mR0p/XzlR3vvOwWJdtzs1MI7JrMOBYyfjCdR0019Zw\n7zOHuej4TsCsFOtsyNp7pJm7zLWw2L3AVSLiE5FVwFpgi1LqCDAuIueK8Zd2DXCPbZ9rzedvBR46\nlvItAKN59LlYisi1NoXZYhopLYNWn8a41PvcReVcjO782XetxTZS/nTLASbCcW64cHXe+3aZ4pWl\n/hPb0R/AJZlDQJUIi6UaKPMMi8Hs8mylVM4yZDtej4vXbOjmDy/0pZpUdw3qMuT5TFWMi4i8SUQO\nAucBvxGR3wMopV4A7gJeBH4HfNisFAP4EPBdoBfYhZHMB7gVaDeT/x8HPlWxLzIHCMcSKflyJzmX\nUNQKY003LoMF5hGC5oXAfjyLBl9NUTmXo4EoixpmV7Ata6tlPBxPeWz5EI0n+d5jezhvdTunFjBR\nsKvJz2QsUVS4Lx07+iZY0V6fUVa+rd7LRLi8+mKpsFgexiVTI+VQMMrYZCyvGSyXn7qEYDTBQ9sG\nGA1FORqI6kqxeUy1qsXuBu7O8N7NwM1ptm8FNqTZHgauLPUa5wvWBbbJ73EUFrPuCmttOZLORh/P\nZ+iQzkUwEsfjEnxpOrob/EZCP5lU0/oXnDIcjLKiPY1siE16v3lpc17HvPupgxwZC/P5N5+c93rA\nUEYGoyCgyUEuwSk7BiamKSHPxCpqGAnG6G4uz1yTgYkw9V53Wi80E93mz2Nm788us9LruDyS8ees\nbqez0ce9zxxK9RTpZP78Za6FxTR5YhmXJS21xBIq5xCmUBpPo7PRX1RYrN7nSRsXbzQvUk6r2GYy\nHIymGgjt2BspnZJMKr75x14+c/fznNrTPKts2ildjda449Il1yPxBPuGQrNkX+wsSknAlC+pbzRQ\nOs+3gDFfvrm2ZlaRw+6jpmBlFiHQmbhdwutOWcwftw/y5P4RANZ0aMHK+Yo2LvMcu3GB3En9UMpz\nsRmXJh+haKKgEFYwmkibzAfDcwHngpp2wjFTVyxdWCzluTgrFhiYCHPN97bwpd9v5zUbuvnB9ecU\nnCTucqDcX5QzAAAgAElEQVSnlS+7B4MkkiprCKgSysiD45GCKrPSqUXvGgjg87hYav5dOuXyU5cQ\njRsNuD6Pi6Wt+e2vmTtUq0NfUyIsuf3FZngiGI3TmqXT3tLkqquxey5mr8t4mIY8E6jBSDxtvgWm\nkvxOmzvtpOtxsWiuq6HR73FUMfbIzkE+dufTTITjfP7NJ3PVWcuKqj6yynT7SyheaU2fzOa5tKX0\nxcpnXAYmwmzIM8wIhsGdqS9mCVbmGw49bVkLy9vq2D8c4oTFTXqs8DxGey7znNmeS/aE71RYzJ5z\nsdR+8w+5BMywWDqssNhEARVjVjd6ewZDmatiLJZI8p+/28Y139tCa52Xe2+8gHecvbzostY6r4dG\nvydVWVUKHt87TL3XnTXnYv0cjpaxSz8f0Uo73WkaKXcfDeaVb7EQEd5wqqFFlu3noZn7aOMyz5ky\nLlOeSzasarGZYTEorJEyGImXJSxm5RbShcUgu/S+Uoob7tjKt/60i7eduYx7b7yA9d2li913l7iR\ncvOeYTaubMuqytxcW4PbJWVrpAxE4oSiibwaKC26mw1l7UTSKM+OxBMcGA7lVSlm5/JTDfGNTA2l\nmvmBNi7zHMu4dDc5y7lMpknoW4OhCvFc7N3+M7GMTiG9Lim5/TQJfTA8l4MjobT9Jn/aMcgftw/y\nqdcczxffeso0Q1oKStmlPzgRoXcgME1TKx0ul9Ba5y1bWMwS48ynDNmis8lPIqk4amrB7RsKkVTk\nJYBpZ313I7deu5F3netM800zN9HGZZ4zNhmj0eehqdbKbzgNi01dcFvqavC6XQV16QeyeS5WWKwQ\nz8WS28/oudQRjiUZDMxu3vvyH7azrK2W974sP2kXp3Q2+UpWLbZlzzAA56xuy/nZ9jKKV071uBQW\nFgNSBjdVhlxEA+TFJ3TRUpe/Srdm7qCNyzxnfDJGU20N9V5nyXOrQ9/erCciBTdSBrPlXPyFey5D\nwShetyuVt5mJXXrfzu9f6Of5Q+N89OJ1jqYpFkJXk5+BiTDJZPFd+pt2D1HndXOyg0R6W30ZPZcC\ndMUsZjZSWoKVq/IoQ9YsPLRxmeeMTcZorq1JXeBDDnIuLmFW02OhEjDBaIK6NIrIUFy1WCZdMQur\nHNne65JIKr5y/3ZWd9TzxtOW5H1Op3SlBqwVf6HfvGeIM1e0UpNjCiYYXly5JGAGC+jOt+hqtnp/\nDOOyezDI4mZ/Xs2YmoWHY+MiIt2iFeTmHFPGxbjABxyExeq8s5seOxt9eYfFYokk0XiShgyDtmrc\nLnweV0EJfUNXLHNYpMfWpW/xv88eZkd/gI9dsi7nyOJisLrSiw2NDQUi7OjPnW+xMMJi5UnoD0yE\n8XpcBc2qX1Tvw+2SVJd+PoKVmoWLo/+BItIK7AYuL+9yNPliGZfaGjciuT2XyWgibYK7syl/zyWb\naKVFo99TWM4lGM1YKQZGtduiBl8qLBZPJPnaAzs5vruR12UZq1sKrC72YntdrHzLuQ7yLWAUN4yH\n49NmnpSKwfEIHQ2+gkq1XS6hs9FH31gkb8FKzcLF6e3dO4H7gevLuBZNAVjGRUSo9+YezpWpuquz\n0c9oKJaXMGI2uX2LBp+nwJxLJGOPi4VRjmx4Lr988hB7jgb5+KXrCtIxy4eUEnCRXfqbdg9RW+Pm\n5KXOBDSt4oaRMoTGDOmXwuemWHmowYkIE5G49lw0jo3Le4AbgWUiUt7bQk1ejE3GaK4zQhn1Pjeh\nHGGxyVhimty+hRVrz6dJL1V5liHnAlPilfkyHIjmnBC5rNWY6xKJJ/jvB3dySk8zl57Ylfe58sUq\n3S42LGb0t7Q6LjywjG058i4DE+GC8i0W3U1++sbC9DocbaxZ+OT8qxaRjcBRpdQB4A7g3eVelMYZ\n4ViCSDyZipPXez0EHITF0nkuHTYJGKcEHITFChl1HI4lCEYTWXMuYHguh0fD/Hjzfg6NTvKJV6+v\nyGApr8dFe723qLDYcDDKtr4JzlnlLCQGUxIw5ShHLrQ736K72U/feJjdg6ZgpQ6LHfM4uWW6DmNm\nCsAPgKvLtxxNPoxbcvuWcfF5UmOHMxGKxtOOJC5EAiZYprDYUDC79IvFstY6EknFl/+wg7NWtnLh\n2kV5nacYupr8eRnimUzlW5wl88HuuZQ2qR+JJxgNxYryXLqa/EyE4zx/aIw6rztVnqw5dslqXESk\nDrgMc/aKUmoQ2C4iryz/0jS5sLrzLc+lzutODe/KRChLQh/yNS6ZB4VZFOK5DFu6YrnCYqb0fiAS\nr5jXYtFVZCPlpt1D+GtcnNLjfGCZ9fModa/LYBE9LhbW/JW/7h4qSLBSs/DI5bnEgHOUUvaRf9cC\nT5ZvSRqnzDQuDb7cA8MmY+nDYu31XkRgMI+7cUeeSwE5l6PmnXnOsJhZjnzBmkV5eQClwFACLtxz\n2bxnmDNXOM+3ALTU1uCS0huXYrrzLSxPZd9Q4ZpimoVF1r9spVRMKTVgvRaR1yulxpVS4+VfmiYX\nszwXnyeVZM9EKJo+oe9xu2ivz68c2RLJzJ5zqck7LGZ5LulGHNtZ1lbLjRet4aYrTsrr+KWgs8nP\n0UCEeAFlwaOhKNv6xjlnVX4G0dIXK3VC31J4LmSWi4U1oRN0Ml9jkG+n2b+VZRWagrCMS0sqoe/O\n6SVk6nMBq5Ey/7BYfYYmSjD6XKKJZF4lzkMOPRcR4R/+Zn1VLmZdTT6UKkwCf8ueYZTKL99i0VaG\nRspBszCh2FJkC53M10D+xkUHUucQMz2XXAl9pZSZ0M9gXJry69IPRgwpGX9N5j+jevNcuQQ17Vi6\nYtnCbdVmplhjPmzaPYzP4+LUZfkP5iqHvtjARASXZFagdkKDz5P6fWnPRQP5G5f3l2UVmoIYm1kt\nZib0MwkqRuJJkoq01WJgei55JKmtQWHZEukNfmNt+YTGjB6XzLpic4GuIozL5j1DnLG8FZ8n/1EA\nixp8ZQmLLWrwFT310Urqa8FKDeRpXJRSW8q1EE3+WHL71kXByn1YysczsUYcp8u5gBFzHwpGU0Of\ncpFtUJjFlOx+LOvn7Azl0BWbC6TGHedpXMZCMV48Mu5IYj8d5fFcwkXlWyy6m/0sbakt+fwczfxk\n7sYdNDkZM+X2LepsKsTpkuzpZrnY6Ww0hj4NB6OOLjbZBoVZFCK7b+iKFX+xKyft9T68Hhf7s4xa\nTsfjewvPt4BhXEZDMeKJZMnEOY0GyuJ/3h++aE2q90qj0ZL785hxU1fMosGUYcnU62IZl2wJfcBx\n3iXboLCpNeU/6ngokFtXrNq4XcLazgZ29Afy2m/T7iG8HhenLXPe32LHEvMcLoHcv8Vgkd35Fucf\nt4jLNmh1KI1BxiuDiJyRbUellO51qTJjM4xLXY6BYVMjjjPkXGyNlE6Ke7MNCrNo8OdvXHLJ7c8V\n1nc18pddQ3nts3nPMKcva5k2rC0frJ/LcDCal0E4MBzisd6jvGbD4pQWHZAaT1xMpZhGk45sV4Yv\nm//6gY3AMxjVYqcAW4Hzyrs0TS7GJmOsXjRVmdOQYziXJcefLSwGOJ5IGYwmco6izddzmYwmCEUT\nWeX25wrruxv55VOHGAvFpl2wMzE2GeOFw2Pc+Kq1BZ/TqugazrME+ku/3869zxzmpl+/yJvOWMq7\nz1/Juq5GhoIRkqqwIWEaTTYyGhel1EUAIvJL4Ayl1HPm6w3A5yqyOk1WZnsuVlgsg3FJM+LYTkee\nYTEjoZ/9DjxlXBzmXKwel7keFgNY190IwPb+Cc52IEC5de8wSeV8fks6LKObT8VYPJHkT9sHeMW6\nDrqb/PziiYP8ePN+zlvdzsvXGXpsHSUIi2k0dpzkXNZbhgVAKfU8cEL5lqRxil1uH+yeS/qcy2SO\nhL6/xk2T3+O4kdJJWKzOawwxc+q5DKdEK+f+nfTxNuPihC17hvG6XZyxvLXgc9rDYk7Zum+E8XCc\nd5y9jC++9RQ2ffpi/vGy49k/HOI/f7cdKK6BUqNJh5NqsedE5LvAD83X7wSeLd+SNE4IxxKEY8np\nnkvOsFhuocnOJr/jXpdgNLdxEREafB4mnHouZrinbR6Exbqb/DT6PWzvc6aG9NSBUU5c0lRwvgWg\ntc7QgMvHc3lo2wBet4sL1nYYx6j38sFXHsf7Xr6KB17q56kDo5y8NP+GTo0mG06My7uBDwIfNV//\nGfh2uRakccZMuX0gNcs+U7XYpBkuy9aH0NHgrEs/nkgSjiWzSr9YNOahjGxdNBfNA89FRDi+u5Ed\nfbkrxhJJxfOHxrjyzJ6izul2CS21NXlJwDzwUj/nrG6bVdnncbu4bMNiXeGlKQtZrwwi4gZuVUq9\nE/hqZZakccJM6ReYmgiZ23PJ/GvvbPLx5P6RnOe3DFh9jpyL8Zncas0Ww5au2DzwXADWdTXy62cO\no5TKqiiwazBAKJrIS2I/E+0NPsdhsT1Hg+weDHLNuSuKPq9Gkw+5VJETwAoRmR//048h0hmXGrcL\nr8eVMaE/maNDHwzPZdBBzsWJ3L5FPrL7Q4EoXo8rpUk211nf3ch4OJ5ztsuzB8cAOKWn+PBTW71z\nZeQHX+oH4OITyj/+WaOx4yQstht4TETuBYLWRqXUV8q2Kk1O0hkXMPXFsvS5+DyurBpSrfVewrEk\n4Vgia24gVdbsxLjkk3MJRllUP7d1xeys7zKS+tv6xuluzlxx9ezBUeq9blaXQNSxvd7LzgFnzZsP\nbRtgXVdDarCaRlMpnFSL7QL+1/xso+2hqSIZjYvPQyhDtZgTuZYWs/psNJRdxiNgniNXKTIYEjDO\nPZfIvAmJgeG5AOzIUTH2zMExNixtLlocEpzri42HY2zZM6y9Fk1VyHnbqZS6qRIL0eRHZs/Fk7nP\nJZrImm8BoxoJYCQUzXonbnlHThL6DT6P4z6X4WB0XpQhW7TUeelq8rGtL7NxicaTvHR4nGvPL03e\no73ey0jIEBjNZqz+vGOQeFJx8fGdJTmvRpMPOa8MItIBfBI4CaNbHwCl1KvKuC5NDlJy+/7pv8J6\nnztzn0ssnlOx1ho8lstzSRkXR2GxmryqxebbPJB1XY1ZPZcd/RNEE8mSJPPBSOgrZdwALMoi8Png\nSwO01tVwehF9NRpNoTgJi/0I2AasAm4C9gKPl3FNGgeMTcZo8HlmKePW+7J7LtmS+UBKzmU0hzCi\nkxHHFg0+N8FoPOOcGTtDgfmhK2bn+O5GdvYHMo4qeObgKACnlsi4OGmkTCQVf9w+wEXrO0sSitNo\n8sWJcWlXSt0KxJRSDyul3gsU5bWIyJdEZJuIPCsid4tIi+29T4tIr4hsF5G/sW0/U0SeM9/7upgZ\nXxHxicid5vbNIrKymLXNF2ZKv1jUezOX/YayjDi2SOVcckinWzkXJ6XIDX4PSk3Jz2QiFI0zGUvM\nebn9mazraiQST7JvKJj2/WcPjNFSV8OyttqSnM+SxhnKoi/25P4RRkMxnW/RVA0nxsW6yhwRkdeJ\nyOlA4eJIBvcDG5RSpwA7gE8DiMiJwFUYIbjLgG+ZvTZgNG6+D1hrPi4zt18HjCil1mD04nyxyLXN\nC8ZnzHKxqMsWFnOQ0LfnXLIRyivn4mwapXWxnA+6YnaO724CMif1nz00xslLm0tWAWcVPGTzXB58\naQCPS1LaYRpNpXFiXP5dRJqBTwD/AHwX+FgxJ1VK/UEpZV1pNgFW2/IVwE+VUhGl1B6gFzhbRBYD\nTUqpTUopBdwBvNG2z+3m858DF8t8qWMtAsNzmX1hb8gaFovnNC61Xjc+j4sxBzkXkexSMqk1pWT3\nsx8zpSs2j6rFANZ0NiBC2qT+ZDTBjv6JkoXEYCosZol8puNBsyu/yZ9brVmjKQdO+lweUEqFgTHg\nojKs4b3AnebzpRjGxuKguS1mPp+53drnAIBSKi4iY0A7cHTmiUTkBuAGgOXLl5fuG1SBsclY2lnl\ndd7MpciT0QS1Nbl/5S11NTk9l0AkQb3X4+huvNEadZzDc7GMy3zLudR63axsr0/rubx4ZIxEUpWk\nedKizfQud2Xoddk/FGLnQICrzp7ff+Oa+Y0T4/K8iPQDj5iPR5VSY7l2EpEHgO40b31WKXWP+ZnP\nAnGMooGyo5S6BbgFYOPGjc4Gxc9RMudc3EQTSaLxJF7PdMd0MpY7LAZGaGzEgefiJN8CzgeGHQ1Y\ncvvzK+cCsK6rIa3nMtWZXzrPxeN2cemJXdz+1324XMJnXnsCNbbCjge3GV35l5ygS5A11cNJn8sa\nEVkOvBx4HfBNERlVSp2WY79Lsr0vIu8GXg9cbIa6AA4By2wf6zG3HWIqdGbfbt/noIh4gGYgv/GA\n85CMxsX0EkLROF7PdA/ASRMlGL0zOcNi0bijfAtM5WVy6YvN17AYGJ3697/YP0vZ4NmDY3Q2+rL2\nDBXCt955Bv9x30t8/7G9bDsywTf+9vRUIcRD2wY4rqOeFe2zPVuNplLkzLmISA/wMgzjcjrwAlNh\nrIIQkcswemcuV0qFbG/dC1xlVoCtwkjcb1FKHQHGReRcM59yDXCPbZ9rzedvBR6yGasFSSQ+W27f\nwvImZnoJiaQiEk/mrBYDy3PJUYrsYJaLRaPfWVhscCKCv8blyADONdZ3N5FU0DsjVPXMwdGSei0W\nNW4X//qGk/jylafyxP4RLv/GYzx/aIyJcIxNu4e4RFeJaaqMk6vDfoy+lv9QSn2gROf9BuAD7jdj\n9puUUh9QSr0gIncBL2KEyz5simcCfAi4DagFfms+AG4FfiAivcAwRrXZgibVnZ9mxPCU5zI972KJ\nVjoKi9XXMLo/V1gs4Tws5nDU8cBEhM5G/7zRFbOzvtto/NzRP8EGczbKRDjG7sEgbzptabZdi+It\nZ/awtquB9//gCd76//2F15+yhFhC8Srdla+pMk6My+nABcDfisingJ3Aw2bvS0GYZcOZ3rsZuDnN\n9q3AhjTbw8CVha5lPjKeQfoFpkJQMy/kodQsl9y/8uZaL6OhaFYZ+WA0TneTs1BPvcNRxwMT4Xk7\ny31lez1et4vttrzLc4eMfMvJJUzmp+OUnhbuvfECPvyjJ/n5Ewdprq3hzBW6K19TXZzkXJ4RkV0Y\nApYvB94FvALDY9BUgUy6YmDzXGZUjFkjjnN16AO01tUQSyiC0URGSf18wmJejwufx+XIc7FGB883\nPG4Xx3U2TBt5XI5kfiY6Gn386H3n8PUHd9LZ6Jul3KDRVBon2mJbMUJYf8GoFrtQKbWv3AvTZCab\ncbHCXrM9F+dhsSll5GhG4xKIJBwbFzDFK3MYl8HxCBeao3jnI8d3N7Jp91QtybMHR1nWVlux0uoa\nt4tPvHp9Rc6l0eTCydXhNUqpwbKvROOYbMalwVYtZscyLk4S+lP6YjF6MkRXgpF4XgO9cg0Mm4wm\nmIjE6ZinYTEwZGDufupQqpLv2YNjJW2e1GjmE058Z5eI3CoivwVDokVErivzujRZsMqE03ouGUYd\nW2GxOgdhsVzKyImkYjJWgOeSJediTb+crzkXIBXS29E/wVAgwsGRyZI2T2o08wknxuU24PfAEvP1\nDuDvy7UgTW7GJo2L9Ey5fZjyXIIZq8VyG4TW+uz6YpZX5GTEsX1dE1k8l4GJMACdDosE5iLrTOOy\nvW+CZw9VLt+i0cxFnBiXRUqpu4AkGBIrQHZ5W01ZySS3D0bCXmS25zJVLZZHziWDMnIwpYjs3Lg0\n+rN7LgMLwHNZ0uyn0edhR/8Ezx0cQwQ2LG2q9rI0mqrg5OoQFJF2QAGIyLkYOmOaKpGpOx9AREzZ\n/fTVYo4S+rVmziWD6u7ULJc8ci45EvoD46bnMo+Ni4iwrruRbX0TNPknWb2onkYtHKk5RnFiXD6O\n0QV/nIg8BnRwjPWVzDXGMsjtW9R53RkT+k6Mi9fjot7rzqgvls+IY4t6X+Y5M2B4Lh6XpCT/5yvr\nuhq577kjeD0uXr5Gy91rjl2c9Lk8KSKvANYDAmxXSmVv39aUlfEMcvsW6bwEK+fiJCwGRsXY6GR6\nzyWQx4jj1Jr8uXIuEToafbjm+dTE47sb+cmW/QA6ma85pnHUaaWUiiulXlBKPQ+8UkTuL/O6NFnI\nFhYDo2JspvxLKBrHJeB12FzXUleTsVoslMcUSotGn4doPEkknj5dZ0i/zN+QmMW6rqkm0JN1Ml9z\nDJPxSiMirxKRHSISEJEfisjJZkPlFzCmQmqqRC7jUu+d7bkYisjO5q+AIV45mqFabCrnkl+1GJBx\nSubAeJiOxvlbKWax3qwY87iEk5boZL7m2CXbbeyXMQZrtWNMePwrcJtS6kyl1C8rsThNenIaF59n\nVs5lMppwHBIDaM7iuViGK69SZH/2UceDExE6m+a/59JW76Wj0ce6rsZp0vsazbFGtquDUkr9yXz+\nKxE5pJT6RgXWpMlCJJ5gMpbIaVyCR2eGxZzNcrFozTKN0krM53M8yxBNpBl1HEskGQpGF0RYDOAj\nF6/N+vvRaI4FshmXFhF5s/2z9tfae6kO2aRfLOq97jR9LglHopUWLbVexiZjJJNqVpI91eeSR7VY\ntrCYNYFyPku/2Ln63BXVXoJGU3WyXR0eBt5ge/1n22sFaONSBSy5/WylyOnKfsMORxxbtNTVkFTG\ngK/muunnCkbi1HndeVV2TY06nu25DIxbDZTzP+ei0WgMMhoXpdR7KrkQjTOcei6hWGKa1xGKxh1J\nv1hY/Sajk9HZxiXqXG7fIhUWS5NzWQjd+RqNZjp66MM8w4lxqfN5UGqqtwXMsFienguQtpEyGEnk\npYgMU6OO03XpT+mKaeOi0SwUtHGZZzjyXFLilVMX8sm8w2KW7P7spH4+g8IsUqOO03ku4xFEYFGD\nNi4azUJBG5d5Rja5fQvLq7Anz/OtFpsaGDbbcwkUYFzqvOkFNQEGAxHa6rzU6OmJGs2CwckkSjfw\nOmCl/fNKqa+Ub1maTKTk9p14LrYL+WQ0kVffhZVzSVeOHIomWNSQnwaYiNDgTS8BMzAeWTCVYhqN\nxsDJ7eevgTDwHKbsvqZ6jE3GqPe6s97lWyXClnFRSpkJ/TyaKLMMDAtG4qxor8tn2YA5jTJNWGxw\nIjyv57hoNJrZODEuPUqpU8q+Eo0jcnXnw5Tml6UvFoknSSpng8Is3C6hye9Jm3MJROJ5dedbZJLd\nH5iIsNamyaXRaOY/ToLcvxWRV5d9JRpH5JLbh6mwmHUht2a55NNECcZEynQDw4w+lwKMi3+2cUkm\nlSH9osNiGs2CwskVYhNwt4i4gBiG7L5SSmlVviow7shzMX6tlr7Y1Ijj/IxLS23NrFLkZFIRiiVo\nyEMR2aLB55nV5zISihJPKm1cNJoFhhPP5SvAeUCdUqpJKdWoDUv1GJuMpSq5MmFViwXMajErPJZP\nnwuYM11mhMUmYwmUyk8R2aIhjXJAqoFS51w0mgWFE+NyAHheKaXKvRhNbpzkXKyQVWhGWCzfUFZr\nGmXklGhliXIuujtfo1mYOLlC7Ab+JCK/BSLWRl2KXB2cGBevx4XX7SIYtTyX/FWMwfBcZpYiW8cs\nKCyWplpsYNzszte6YhrNgsKJcdljPrzmQ1MlovFkTrl9i3rflDJyKM8RxxYtdTVMhOPEE0k8Zumz\ndcx8FJEtGn0eAtH4NM0zy3PRfS4azcIi5xVCKXVTJRaiyY0T6ReLOq8nJf8yFRbLP6FvnbfdlGYp\nZFCYRYPf0DwzCgKM/QcnIjT6PHkbPo1GM7dx0qH/RwyJ/WkopV5VlhVpMjLmQG7fwp48DxVRigww\najMuqRBbAcal3qYvZhmXgYkwHVqwUqNZcDi5QvyD7bkfeAuQflatpqzk5bn43CmjMmkahHy9g6ku\n/am8i1WBVmgpsnGMqT+fgXHd46LRLESchMWemLHpMRHZUqb1aLIwnodxsVdmhQquFjP1xYJTFWOp\nnEsBnks62f2BiQinLWvJ+1gajWZuk7MUWUTabI9FIvI3QHMF1javCETifO7eF9LKpZSK0Unj2M5y\nLm5CM/tc8g2L1U2FxSxSpciFdOj7jHVbFWNKKQYmwtpz0WgWIE6uEE9g5FwEIxy2B7iunIuaj/x1\n1xC3/WUvHY0+PnzRmpIfv388zNce2ElzbQ3dzbnLduttnks4lsDnceHOYywxkJpAaTeYlox/vsPC\nwB4Wi5n/xgnHknpImEazAHESFltViYXMdw6PTgLws60H+NArj0Mkvwt5NoYCEd753c0cnYjww+vP\nceQ11Hs9qeR7vrNcLJr8HtwumdZIGYzG8de4UqXJ+WCFxSwJmKkGSt3jotEsNDJeIUTkLBHptr2+\nRkTuEZGvi0hbZZY3fzg8ZhiXvUMhtuwZLtlxx0Ixrr51CwdHQtz67rM4fXmro/3qfZ6Ul2EYl/zD\nWCJCc23NtEbKQCReUI+LtSaYCq0NjOvufI1moZLt9vM7QBRARC4EvgDcAYwBt5R/afOLw6Nhupv8\nNPg83Ln1QEmOGYjEufb7W+gdCPCdqzdy7up2x/vWe91EE0mz8TJecB9JywwJmFABUyhTa/JZmmeW\n52J25+uwmEaz4MhmXNxKKesW/O3ALUqpXyil/hkofVJhnnN4dJKVi+p4w6lLuO+5I0yEZ0vV58Nk\nNMF7b3uc5w6N8Y2/PZ1XrOvIa3+7MnKhYTEwkvpWIQEYpciFGhefx43X40pNoxxMdefrsJhGs9DI\nalxExLqKXAw8ZHuvsKuLiYj8XxF5VkSeFpE/iMgS23ufFpFeEdluVqZZ288UkefM974uZlJDRHwi\ncqe5fbOIrCxmbYVyZHSSJS21vP2sZYRjSX79zJGCjxWJJ7jhB1t5fO8wX337abz6pO7cO83A8hKC\n0QShPEcc22mprZlVilxIj4tFo29KX2xgIoLP46LJX9Sfk0ajmYNkMy4/AR4WkXuASeARABFZgxEa\nK4YvKaVOUUqdBvwv8C/msU8ErgJOAi4DviUi1pXs28D7gLXm4zJz+3XAiFJqDfBV4ItFri1v4okk\nffEkQgQAABKFSURBVONhlrbUcmpPM+u7GosKjf3Pg708svMoX3zLKVx+6pLcO6ShzjbqeLIIz6Wl\nzptq3gQjoV9I/sbCPjBsYDxMZ5OvpMUPGo1mbpDRuCilbgY+AdwGXGCT3HcBf1fMSZVS47aX9UzJ\ny1wB/FQpFVFK7QF6gbNFZDHQpJTaZK7jDuCNtn1uN5//HLhYKny16p+IkFSwuLkWEeHKjT08c2CU\n7X0TBR3vpSPjnLC4ibdtXFbwmhpsyfNQNF6EcZme0A8WOOLYvi6756IrxTSahUnWelLzYn63Uipo\n27ZDKfVksScWkZtF5ADwTkzPBViKMT/G4qC5ban5fOb2afsopeIYXlXazLeI3CAiW0Vk6+DgYLFf\nIcURswx5SYtxoXzzGT3UuIW7CvRe+sbDdBeZ5LaMSTCSYDKaoLamMIPQWldDKJogEk+kjldfRFjM\n3n8zoMcbazQLlvybFRwiIg+IyPNpHlcAKKU+q5RaBvwIuLFc67CjlLpFKbVRKbWxoyO/BHk2DpnG\nZWlLLQBt9V4uPbGLu586RDSezPt4/eNhR42S2UiV/UbjTMaKC4uBURINhudSTFis0Tc9LKal9jWa\nhUnZjItS6hKl1IY0j3tmfPRHGGKYAIcAeyyox9x2yHw+c/u0fcwChGZgqLTfJjuHR42S2sWmcQG4\ncuMyhoNRHnypP69jReNJjgaidBU59rd+WlisGONidOmPhGIopQhGiwyLmTmXcCzBeDiuPReNZoFS\nNuOSDRFZa3t5BbDNfH4vcJVZAbYKI3G/RSl1BBgXkXPNfMo1wD22fa41n78VeKjSI5mPjE3S5PdM\nu+heuLaDxc3+vBP7Vu9Hd9HGxTAmE+E4kXiy4D6XlL5YKEo4liSpChOttLByLoO6O1+jWdBUqwb0\nCyKyHkgC+4APACilXhCRu4AXMXTMPqyUSpj7fAijuKAW+K35ALgV+IGI9ALDGNVmFeWwWYZsx+0S\n3npmD9/8Yy9HxiZZ3FybYe/p9Jtjf7uKDYuZoauhgHERL9RzsUQyR0KxVDirmJxLg9/DRCSeMqJ6\nlotGszCpinFRSr0ly3s3Azen2b4V2JBmexi4sqQLzJNDo+FZxgXgyjOX8T8P9fKLJw5y46vWptlz\nNn1jhjEo1nOprXEjAoMBo9KrtsA8iTUwbGwymtIqK1T+BaDB6yEaT3JwxMhT6bCYRrMwqUpYbKFx\nZGwyVSlmZ3l7HeetbueurQdJJp1F6izPpVjj4nIJdTXuVPiprsAmyta6dJ5LcTkXgD1HjQJEHRbT\naBYm2rgUSTASZzQUS+u5ALztrB72D4fYtMdZjUH/eBivx5VKpBdDvc/DUTMsVmjOpbbGjdftYiQU\nTQlhFtvnArB7MIjbJbSbnpFGo1lYaONSJEdMNeQlGXIqr9mwGJcY816c0DcepqtEXev1Pk/KcynU\nuIgILXU1jIViU4PCipF/sXkuixq8uPKcMaPRaOYH2rgUiVWGnMlz8de4WdxcyyEzx5CLvrFw0SEx\ni3qfO+W5FBoWg6ku/aCZcynOczE8st2DAR0S02gWMNq4FMnhGd356VjaWptKYOeifzxcdI+LRZ3X\nQ8Rs4iym8bGlzsuozXMpRc4lGE3oZL5Gs4DRxqVIDo9OIkJWg9DTWsvBkVDOYymlTOmX0hgXu4dR\naFgMjKT+aChGoIgRx1NrmtpXz3HRaBYu2rgUyeGxMF2NfmqyjP3taa2jbzycUwpmfNKYKV+s9IuF\nvbel0D4XgJZaY6ZLqBSei2+qUEHPcdFoFi7auBSJ0UCZ/SLZ01pLUhn5lGz0m42FpQqL2T2XooxL\nfY1RihyN4/W4shrSnGuyzW7RYTGNZuGijUuRHB6dnKYplo6eVuP9XKExy/iUMudiUUxYrKXWa2ie\nTUSLComBUVhgFcJp46LRLFy0cSkCpRSHx8IpNeRMLGutA8iZ1O8rUQOlhZXfcLsEbxHehtVIeWg0\nVFRIDIzmzgbT6HWW6HtqNJq5hzYuRTAUjBKNJ1mSI0fS3ezHJbk9l37TcylVorvONASGFEzh/SSW\n7P6h0cmiypAtrNCYltvXaBYu2rgUgVWGnCssVuN2sbg5dzly33iY1rqagufdz8TyMooJicGU7P6R\n0XDRnot9XR0N2rhoNAsVbVyKwGqgzBUWA2e9LqXscYGpkuFikvkwJbsfT6qijwVGoUFrXQ1ej/7z\n02gWKvp/dxFMNVDmNi5Oel36SjCB0o6V0K8t0hOy65yVIizWVFtTUiOq0WjmHtWa57IgODw6ic/j\nSiW8s2H0uhhjjzPdsfePR9iwpLlk67MMQbHehjXTBYrrcbH4h1evIxzLf/yzRqOZP2jjUgRHzEox\nJ8lye6/L8va6We/HEkmOBiIlvaO3BCaLkX4BQx+ttsbNZCxRdCkywCk9LUUfQ6PRzG10WKwIDqWZ\nQJmJXL0ugxMRlCpdjwtMeS7FJvRhqhy5FJ6LRqNZ+GjjUgSHRydZ7DBHkqvXJdXj0ly6Cqq6EiX0\nYaocWRsXjUbjBG1cCiQaTzIYiDj2XHL1uvSXuDsfSpdzgamkfinCYhqNZuGjjUuB9I+HUcpZGTLk\n7nUpdXc+2KvFivc2WrXnotFo8kAblwI5lGqgdG4MsvW69I2H8bpdtJVw7K/X42Jlex2rO+qLPlaz\n6bmUohRZo9EsfPSVokBS440dei5gJPU3ZRh3PDAeobNE443t/On/XFSS4+iEvkajyQftuRRIarxx\ncz7GJfNcl1KONy4HU2ExnXPRaDS50calQA6NTtJaV5NXmW+2uS6lln4pNbpaTKPR5IM2LgVyJI8e\nF4tMvS7WeOO5bFwuOaGTj1+6jrWdjdVeikajmQdo41Igh0fDeRuXTL0uE5E4oWiipD0upaalzstH\nLl6L21XanJBGo1mYaONSIIdHJ3POcZlJpl6XcvS4aDQaTTXRxqUAxsMxJiLxvD2XTL0u5ehx0Wg0\nmmqijUsBHLEqxfI0LpC+16V/PAJQUrl9jUajqSbauBTA1ByX/I1Burku/eM6LKbRaBYW2rgUwOEC\nGigt0vW69I2FaSnheGONRqOpNtq4FMDh0UncLqGzsTDPZWavS994mK4CjqXRaDRzFW1cCuDwqNFN\nX0hZbrpel/7xMF0636LRaBYQ2rgUwOHRyYLyLZC+18WQfpm7PS4ajUaTL9q4FMDhsUkW56EpZmdm\nr0vcHG+sy5A1Gs1CQhuXPEkmFX1j+XfnW8zsdTkaiJJU6LCYRqNZUGjjkidHAxFiCcXSAsNiML3X\nRTdQajSahUhVjYuIfEJElIgssm37tIj0ish2Efkb2/YzReQ5872vizn4RER8InKnuX2ziKws55pT\nQ8IKDIvB9F6XPi39otFoFiBVMy4isgx4NbDftu1E4CrgJOAy4FsiYjV/fBt4H7DWfFxmbr8OGFFK\nrQG+CnyxnOs+XER3voW910U3UGo0moVINT2XrwKfBJRt2xXAT5VSEaXUHqAXOFtEFgNNSqlNSikF\n3AG80bbP7ebznwMXS6nHOdqwJlAuLcq4TPW69I2HqXEL7SUcb6zRaDTVpirGRUSuAA4ppZ6Z8dZS\n4IDt9UFz21Lz+czt0/ZRSsWBMaA9w3lvEJGtIrJ1cHCwoLWfd1w7//L6E2mqLXxoVo9pmA6Ohugf\nC9PZ6Melpew1Gs0ComxjBUXkAaA7zVufBT6DERKrKEqpW4BbADZu3KhyfDwtJy1p5qQlzUWto8fW\n62IMCdM9LhqNZmFRNuOilLok3XYRORlYBTxjRq96gCdF5GzgELDM9vEec9sh8/nM7dj2OSgiHqAZ\nGCrdNyk9U70uk/SPh1nfrac7ajSahUXFw2JKqeeUUp1KqZVKqZUYIa4zlFJ9wL3AVWYF2CqMxP0W\npdQRYFxEzjXzKdcA95iHvBe41nz+VuAhMy8zZ/F6XHQ3+Tk4EqJ/PKKT+RqNZsFRNs+lEJRSL4jI\nXcCLQBz4sFIqYb79IeA2oBb4rfkAuBX4gYj0AsMY1WZznp7WOrb3TRCIxHWPi0ajWXBU3biY3ov9\n9c3AzWk+txXYkGZ7GLiyXOsrFz2ttfzqaSOyp4eEaTSahYbu0K8SVjkyUJB0v0aj0cxltHGpElbF\nGGjPRaPRLDy0cakS1lwX0LpiGo3m/7V3bzF2VXUcx78/hzadUKkttxjagkTSUBWnikpCYypGUyjh\nooaCEHkzJCRgAmolJrQmfeFBDdEHDTbWgG1IhFo1MVRoUo3KvRYKGImpl4odG0VEDWr9+bDXke2k\nTOeyzhy7z++TTM4+a58zZ/3azPxn7cta3ZPiMiC9kctJC05gdH6WN46IbklxGZDevS45JBYRXZTi\nMiC9e11yj0tEdNHAL0UeZp9cu4IlJ2bql4jonhSXAbpy1dJjvygi4jiUw2IREVFdiktERFSX4hIR\nEdWluERERHUpLhERUV2KS0REVJfiEhER1aW4REREdfo/XxG4byT9AfjVDN9+CnC4YneOF8OaG4Y3\ne3IPl6nkPtP2qcf6RkNbXGZD0mO2zx90P+basOaG4c2e3MOlZu4cFouIiOpSXCIioroUl5n56qA7\nMCDDmhuGN3tyD5dquXPOJSIiqsvIJSIiqktxiYiI6lJcpknSWkk/l/S8pA2D7k+/SNoiaVzS0622\nJZJ2SfpFeVw8yD72g6RlknZLekbSfkk3l/ZOZ5e0QNIjkn5Wcm8q7Z3O3SNpRNKTkr5bnnc+t6QD\nkp6StFfSY6WtWu4Ul2mQNAJ8GbgYWAlcI2nlYHvVN18H1k5o2wA8aPsc4MHyvGv+BdxieyVwAXBj\n+T/uevZXgItsvx0YA9ZKuoDu5+65GXi29XxYcr/P9ljr3pZquVNcpufdwPO2f2n7H8B24PIB96kv\nbO8B/jih+XJga9neClwxp52aA7ZfsP1E2f4LzS+cM+h4djdeLk/nlS/T8dwAkpYC64C7Ws2dz/0a\nquVOcZmeM4DftJ7/trQNi9Ntv1C2fw+cPsjO9Juks4BVwMMMQfZyaGgvMA7ssj0UuYEvAp8C/t1q\nG4bcBn4g6XFJHy9t1XKfMNvexXCybUmdvY5d0kLgW8AnbL8k6b/7uprd9hFgTNIbgPslvXXC/s7l\nlnQpMG77cUlrjvaaLuYuVts+KOk0YJek59o7Z5s7I5fpOQgsaz1fWtqGxSFJbwQoj+MD7k9fSJpH\nU1jusX1faR6K7AC2XwR205xz63ruC4HLJB2gOcx9kaS76X5ubB8sj+PA/TSH/avlTnGZnkeBcyS9\nSdJ84Gpg54D7NJd2AteX7euBbw+wL32hZojyNeBZ259v7ep0dkmnlhELkkaBDwDP0fHctj9je6nt\ns2h+nh+yfR0dzy3pREmv720DHwSepmLu3KE/TZIuoTlGOwJssb15wF3qC0nbgDU0U3AfAm4HdgD3\nAstpliu4yvbEk/7HNUmrgR8CT/HqMfjbaM67dDa7pPNoTuCO0PzRea/tz0k6mQ7nbiuHxW61fWnX\nc0s6m2a0As3pkW/a3lwzd4pLRERUl8NiERFRXYpLRERUl+ISERHVpbhERER1KS4REVFdikvEFEk6\nUmaQ7X1NOqmfpBskfazC5x6QdMpsv0/EXMqlyBFTJOll2wsH8LkHgPNtH57Be+cD82z/tXrHIiaR\nkUvELJWRxR1lbYxHJL25tG+UdGvZvqmsEbNP0vbStkTSjtL203IjI5JOlvRAWVflLkCtz7qufMZe\nSV8py0BMZjGwv7z2XX35B4g4ihSXiKkbnXBYbH1r359tvw34Es0MDhNtAFbZPg+4obRtAp4sbbcB\n3yjttwM/sv0WmruolwNIOhdYD1xoeww4Alw7WYdtHwJW0MwVtrksiHWTpCXTTh8xDZkVOWLq/l5+\nqR/NttbjF46yfx9wj6QdNNPoAKwGPgxg+6EyYjkJeC/wodL+PUl/Kq9/P/BO4NEyS/MoU5hY0PYr\nNJMybpe0nKYA3iHpbNu/O9b7I2YiI5eIOvwa2z3raFYxfQdNcZjJH3YCtpaVA8dsr7C98X9eIL2n\nNbK6rNV+mqRbgO/QzB/2UZo54yL6IsUloo71rceftHdIeh2wzPZu4NPAImAhzQSZ15bXrAEO234J\n2EPzyx9JF9OcN4Fm2dmPlPU3eudszmx/lu2HW8Vnp6RFZbS0B1gAXGJ7ne37yvotEX2Rw2IRUzda\nVmrs+b7t3uXIiyXto1mL/poJ7xsB7pa0iGb0caftFyVtBLaU9/2NV6c63wRsk7Qf+DHwawDbz0j6\nLPBAKVj/BG6kmb12MncCu51LQ2MO5VLkiFmazaXCEV2Vw2IREVFdRi4REVFdRi4REVFdiktERFSX\n4hIREdWluERERHUpLhERUd1/ADs49kBwVR0AAAAAAElFTkSuQmCC\n",
100 | "text/plain": [
101 | ""
102 | ]
103 | },
104 | "metadata": {},
105 | "output_type": "display_data"
106 | },
107 | {
108 | "name": "stdout",
109 | "output_type": "stream",
110 | "text": [
111 | "Mean reward over the last 5 episodes: -159.127544086\n"
112 | ]
113 | }
114 | ],
115 | "source": [
116 | "plot_rewards([-random()*i*100 for i in range(50)][::-1])"
117 | ]
118 | }
119 | ],
120 | "metadata": {
121 | "kernelspec": {
122 | "display_name": "Python 3",
123 | "language": "python",
124 | "name": "python3"
125 | },
126 | "language_info": {
127 | "codemirror_mode": {
128 | "name": "ipython",
129 | "version": 3
130 | },
131 | "file_extension": ".py",
132 | "mimetype": "text/x-python",
133 | "name": "python",
134 | "nbconvert_exporter": "python",
135 | "pygments_lexer": "ipython3",
136 | "version": "3.6.1"
137 | }
138 | },
139 | "nbformat": 4,
140 | "nbformat_minor": 2
141 | }
142 |
--------------------------------------------------------------------------------
/notebooks/images/quadcopter_tumble.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llSourcell/Drone_Flight_Controller/21bf258de17810bd249f0efb114880fe325c8bf3/notebooks/images/quadcopter_tumble.mp4
--------------------------------------------------------------------------------
/notebooks/images/quadcopter_tumble.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llSourcell/Drone_Flight_Controller/21bf258de17810bd249f0efb114880fe325c8bf3/notebooks/images/quadcopter_tumble.png
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | quad_controller_rl
4 | 0.0.0
5 | Learns to control a quadcopter using Reinforcement Learning. Based on quad_controller from Udacity/RoboND.
6 |
7 | CC BY-NC-ND 2.0
8 | https://www.udacity.com
9 |
10 | Arpan Chakraborty
11 | Arpan Chakraborty
12 |
13 | catkin
14 |
15 | message_generation
16 | std_msgs
17 | geometry_msgs
18 | std_srvs
19 |
20 | rospy
21 | message_runtime
22 | std_msgs
23 | geometry_msgs
24 | std_srvs
25 |
26 |
--------------------------------------------------------------------------------
/scripts/drone_sim:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Launch simulator
4 | me=`basename "$0"`
5 | echo `pwd`
6 | sim=${1:-DroneSim} # default path to executable
7 | if [[ -x ${sim} ]]
8 | then
9 | echo "[$me] Starting simulator: $sim"
10 | $sim
11 | else
12 | echo "[$me] Simulator not found/not an executable: $sim" >&2
13 | echo "[$me] Start manually or specify on command line: sim:=" >&2
14 | fi
15 |
--------------------------------------------------------------------------------
/scripts/rl_controller_node:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """ROS node for Reinforcement Learning controllers."""
4 |
5 | import time
6 | from threading import Lock
7 |
8 | import rospy
9 | from geometry_msgs.msg import PoseStamped, Twist, Wrench
10 | from sensor_msgs.msg import Imu
11 | from std_srvs.srv import SetBool
12 |
13 | from quad_controller_rl.tasks.base_task import BaseTask
14 | from quad_controller_rl.agents.base_agent import BaseAgent
15 | from quad_controller_rl.srv import SetPose
16 |
17 | def get_subclass(cls, name):
18 | """Return a concrete subclass by name (classmethod)."""
19 | types = {t.__name__: t for t in cls.__subclasses__()}
20 | assert name in types, "Unknown type '{}' (must be a subclass of {})".format(name, cls.__name__)
21 | return types[name]
22 |
23 | class RLControllerNode:
24 | def __init__(self, name='rl_controller', num_episodes=10000, update_rate=30, transient_period=0.1):
25 | # Set ROS node and environment params, initialize node
26 | self.state = 'INIT' # one of ['INIT', 'RESET', 'EPISODE']; to keep track of current state
27 | self.name = name # to identify this ROS node
28 | self.num_episodes = num_episodes # max no. of episodes to run for
29 | self.update_rate = update_rate # Hz; rate at which task is updated
30 | self.transient_period = transient_period # secs; period to wait for episode reset requests to settle
31 | self.sub_callback_lock = Lock() # mutex/lock for subscriber callback
32 | rospy.init_node(self.name)
33 |
34 | # Initialize task
35 | task_name = rospy.get_param('task') # read from launch/rl_controller.launch or commandline task:=
36 | rospy.loginfo("RLControllerNode[{}]: task = {}".format(self.state, task_name)) # [debug]
37 | self.task = get_subclass(BaseTask, task_name)()
38 |
39 | # Initialize agent and pass on to task
40 | agent_name = rospy.get_param('agent') # read from launch/rl_controller.launch or commandline agent:=
41 | rospy.loginfo("RLControllerNode[{}]: agent = {}".format(self.state, agent_name)) # [debug]
42 | self.agent = get_subclass(BaseAgent, agent_name)(self.task) # needs a reference to task
43 | self.task.set_agent(self.agent)
44 |
45 | # Other variables
46 | self.episode = 0
47 |
48 | def start(self):
49 | # Wait till quad_rotor services are ready
50 | rospy.wait_for_service('/quad_rotor/reset_orientation')
51 | rospy.wait_for_service('/quad_rotor/set_pose')
52 |
53 | # Set pub-sub topics and callbacks
54 | self.pose_sub = rospy.Subscriber("/quad_rotor/pose", PoseStamped, self.sub_callback, queue_size=1) # latest message only
55 | self.imu_sub = rospy.Subscriber("/quad_rotor/imu", Imu, self.sub_callback, queue_size=1) # latest message only
56 | self.cmd_vel_pub = rospy.Publisher("/quad_rotor/cmd_vel", Twist, queue_size=1)
57 | self.cmd_force_pub = rospy.Publisher("/quad_rotor/cmd_force", Wrench, queue_size=10)
58 |
59 | # Reset to begin first episode
60 | self.reset()
61 |
62 | # Loop indefinitely
63 | self.loop()
64 |
65 | def loop(self):
66 | # Run till shutdown is signalled
67 | rate = rospy.Rate(self.update_rate) # update task periodically
68 | while not rospy.is_shutdown():
69 | rate.sleep() # handle events, callbacks
70 |
71 | # Update task, get control action (force command) and whether this episode is done
72 | if self.timestamp and self.pose and self.angular_velocity and self.linear_acceleration:
73 | rospy.logdebug("RLControllerNode[{}]: t = {}".format(self.state, self.timestamp))
74 | cmd, done = self.task.update(self.timestamp, self.pose, self.angular_velocity, self.linear_acceleration)
75 | if done:
76 | # If done, reset to begin next episode
77 | self.reset()
78 | elif cmd is not None:
79 | # Else, execute control action
80 | self.cmd_force_pub.publish(cmd)
81 |
82 | def reset(self):
83 | self.state = 'RESET'
84 |
85 | # Reset task and set initial conditions
86 | initial_pose, initial_velocity = self.task.reset()
87 |
88 | reset_orientation = rospy.ServiceProxy('/quad_rotor/reset_orientation', SetBool)
89 | reset_orientation(True) # reset orientation, velocity, force
90 |
91 | set_pose = rospy.ServiceProxy('/quad_rotor/set_pose', SetPose)
92 | set_pose(initial_pose) # set initial pose
93 |
94 | if initial_velocity is not None:
95 | self.cmd_vel_pub.publish(initial_velocity) # set initial velocity
96 |
97 | time.sleep(self.transient_period) # wait for reset requests to settle
98 |
99 | # Start a new episode (None values will be initialized on first callback)
100 | self.start_time = None
101 | self.timestamp = None
102 | self.pose = None
103 | self.angular_velocity = None
104 | self.linear_acceleration = None
105 | self.episode += 1
106 | if self.episode > self.num_episodes:
107 | shutdown_reason = "RLControllerNode[{}]: Finished {} episodes".format(self.state, self.episode - 1)
108 | rospy.loginfo(shutdown_reason) # [debug]
109 | rospy.signal_shutdown("shutdown_reason")
110 | return
111 | rospy.loginfo("RLControllerNode[{}]: Episode {}".format(self.state, self.episode)) # [debug]
112 |
113 | self.state = 'EPISODE'
114 |
115 | def sub_callback(self, msg):
116 | # Use a mutex/lock so that only one message is processed at a time (drop others)
117 | if self.sub_callback_lock.acquire(blocking=False):
118 | try:
119 | if self.state == 'EPISODE': # do nothing in other states
120 | # Initialize start time, once per episode
121 | if self.start_time is None:
122 | self.start_time = msg.header.stamp
123 |
124 | # Extract relevant components from message
125 | self.timestamp = (msg.header.stamp - self.start_time).to_sec() # in secs, relative to start time
126 | if msg._type == 'geometry_msgs/PoseStamped':
127 | self.pose = msg.pose
128 | elif msg._type == 'sensor_msgs/Imu':
129 | self.angular_velocity = msg.angular_velocity
130 | self.linear_acceleration = msg.linear_acceleration
131 | else:
132 | rospy.logwarn("RLControllerNode[{}]: Unknown message type: '{}'".format(self.state, msg._type))
133 | finally:
134 | self.sub_callback_lock.release()
135 |
136 |
137 | if __name__ == '__main__':
138 | try:
139 | node = RLControllerNode()
140 | node.start()
141 | except rospy.ROSInterruptException:
142 | pass
143 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
2 |
3 | from distutils.core import setup
4 | from catkin_pkg.python_setup import generate_distutils_setup
5 |
6 | # fetch values from package.xml
7 | setup_args = generate_distutils_setup(
8 | packages=['quad_controller_rl'],
9 | package_dir={'': 'src'})
10 |
11 | setup(**setup_args)
12 |
--------------------------------------------------------------------------------
/sim/README:
--------------------------------------------------------------------------------
1 | (Optional) To launch simulator automatically:
2 | - Save the simulator for your platform here (or symlink it). Must be an executable named "DroneSim".
3 | - Or, specify full path in command-line arg to roslaunch, e.g.:
4 | roslaunch quad_controller_rl rl_controller.launch sim:=/home/my_user/my_sim_dir/DroneSim.x86_64
5 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/agents/__init__.py:
--------------------------------------------------------------------------------
1 | from quad_controller_rl.agents.base_agent import BaseAgent
2 | from quad_controller_rl.agents.policy_search import RandomPolicySearch
3 | from quad_controller_rl.agents.policy_gradients import DDPG
4 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/agents/base_agent.py:
--------------------------------------------------------------------------------
1 | """Generic base class for reinforcement learning agents."""
2 |
3 | class BaseAgent:
4 | """Generic base class for reinforcement reinforcement agents."""
5 |
6 | def __init__(self, task):
7 | """Initialize policy and other agent parameters.
8 |
9 | Should be able to access the following (OpenAI Gym spaces):
10 | task.observation_space # i.e. state space
11 | task.action_space
12 | """
13 | pass
14 |
15 | def step(self, state, reward, done):
16 | """Process state, reward, done flag, and return an action.
17 |
18 | Params
19 | ======
20 | - state: current state vector as NumPy array, compatible with task's state space
21 | - reward: last reward received
22 | - done: whether this episode is complete
23 |
24 | Returns
25 | =======
26 | - action: desired action vector as NumPy array, compatible with task's action space
27 | """
28 | raise NotImplementedError("{} must override step()".format(self.__class__.__name__))
29 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/agents/policy_gradients.py:
--------------------------------------------------------------------------------
1 | """Policy search agent."""
2 |
3 | import numpy as np
4 | from quad_controller_rl.agents.base_agent import BaseAgent
5 |
6 | import tensorflow as tf
7 | import numpy as np
8 | import gym
9 | import random
10 | from collections import deque
11 |
12 | class DDPG(BaseAgent):
13 | """Sample agent that searches for optimal policy deterministically."""
14 |
15 | def __init__(self, task):
16 | # Task (environment) information
17 | self.task = task
18 | state_dim = int(np.prod(self.task.observation_space.shape))
19 | # self.action_dim = int(np.prod(self.task.action_space.shape))
20 |
21 | # use only z-coordinate
22 | self.action_dim = 1
23 | self.action_space_low = self.task.action_space.low[3]
24 | self.action_space_high = self.task.action_space.high[3]
25 |
26 | # set seeds to 0
27 | np.random.seed(0)
28 |
29 |
30 | # Network parameters
31 | gamma = 0.99
32 | h1_actor = 8
33 | h2_actor = 8
34 | h3_actor = 8
35 | h1_critic = 8
36 | h2_critic = 8
37 | h3_critic = 8
38 | lr_actor = 1e-3
39 | lr_critic = 1e-3
40 | lr_decay = 1
41 | l2_reg_actor = 1e-6
42 | l2_reg_critic = 1e-6
43 | dropout_actor = 0
44 | dropout_critic = 0
45 | tau = 1e-2
46 |
47 | self.train_every = 1
48 | self.minibatch_size = 1024
49 | self.initial_noise_scale = 10
50 | self.noise_decay = 0.99
51 | self.exploration_mu = 0.0
52 | self.exploration_theta = 0.15
53 | self.exploration_sigma = 0.2
54 | self.ep = 0
55 | self.total_steps = 0
56 | self.log_file = open("log_file" + str(time.time()))
57 |
58 |
59 | replay_memory_capacity = int(1e5) # capacity of experience replay memory
60 | self.replay_memory = deque(maxlen=replay_memory_capacity) # used for O(1) popleft() operation
61 |
62 | ## Tensorflow
63 |
64 | tf.reset_default_graph()
65 |
66 | # placeholders
67 | self.state_ph = tf.placeholder(dtype=tf.float32, shape=[None,state_dim])
68 | self.action_ph = tf.placeholder(dtype=tf.float32, shape=[None,self.action_dim])
69 | self.reward_ph = tf.placeholder(dtype=tf.float32, shape=[None])
70 | self.next_state_ph = tf.placeholder(dtype=tf.float32, shape=[None,state_dim])
71 | self.is_not_terminal_ph = tf.placeholder(dtype=tf.float32, shape=[None]) # indicators (go into target computation)
72 | self.is_training_ph = tf.placeholder(dtype=tf.bool, shape=()) # for dropout
73 |
74 | # episode counter
75 | episodes = tf.Variable(0.0, trainable=False, name='episodes')
76 | self.episode_inc_op = episodes.assign_add(1)
77 |
78 | # will use this to initialize both the actor network its slowly-changing target network with same structure
79 | def generate_actor_network(s, trainable, reuse):
80 | hidden = tf.layers.dense(s, h1_actor, activation = tf.nn.relu, trainable = trainable, name = 'dense', reuse = reuse)
81 | hidden_drop = tf.layers.dropout(hidden, rate = dropout_actor, training = trainable & self.is_training_ph)
82 | hidden_2 = tf.layers.dense(hidden_drop, h2_actor, activation = tf.nn.relu, trainable = trainable, name = 'dense_1', reuse = reuse)
83 | hidden_drop_2 = tf.layers.dropout(hidden_2, rate = dropout_actor, training = trainable & self.is_training_ph)
84 | hidden_3 = tf.layers.dense(hidden_drop_2, h3_actor, activation = tf.nn.relu, trainable = trainable, name = 'dense_2', reuse = reuse)
85 | hidden_drop_3 = tf.layers.dropout(hidden_3, rate = dropout_actor, training = trainable & self.is_training_ph)
86 | actions_unscaled = tf.layers.dense(hidden_drop_3, self.action_dim, trainable = trainable, name = 'dense_3', reuse = reuse)
87 | actions = self.action_space_low + tf.nn.sigmoid(actions_unscaled)*(self.action_space_high - self.action_space_low) # bound the actions to the valid range
88 | return actions
89 |
90 | # actor network
91 | with tf.variable_scope('actor'):
92 | # Policy's outputted action for each self.state_ph (for generating actions and training the critic)
93 | self.actions = generate_actor_network(self.state_ph, trainable = True, reuse = False)
94 |
95 | # slow target actor network
96 | with tf.variable_scope('slow_target_actor', reuse=False):
97 | # Slow target policy's outputted action for each self.next_state_ph (for training the critic)
98 | # use stop_gradient to treat the output values as constant targets when doing backprop
99 | slow_target_next_actions = tf.stop_gradient(generate_actor_network(self.next_state_ph, trainable = False, reuse = False))
100 |
101 | # will use this to initialize both the critic network its slowly-changing target network with same structure
102 | def generate_critic_network(s, a, trainable, reuse):
103 | state_action = tf.concat([s, a], axis=1)
104 | hidden = tf.layers.dense(state_action, h1_critic, activation = tf.nn.relu, trainable = trainable, name = 'dense', reuse = reuse)
105 | hidden_drop = tf.layers.dropout(hidden, rate = dropout_critic, training = trainable & self.is_training_ph)
106 | hidden_2 = tf.layers.dense(hidden_drop, h2_critic, activation = tf.nn.relu, trainable = trainable, name = 'dense_1', reuse = reuse)
107 | hidden_drop_2 = tf.layers.dropout(hidden_2, rate = dropout_critic, training = trainable & self.is_training_ph)
108 | hidden_3 = tf.layers.dense(hidden_drop_2, h3_critic, activation = tf.nn.relu, trainable = trainable, name = 'dense_2', reuse = reuse)
109 | hidden_drop_3 = tf.layers.dropout(hidden_3, rate = dropout_critic, training = trainable & self.is_training_ph)
110 | q_values = tf.layers.dense(hidden_drop_3, 1, trainable = trainable, name = 'dense_3', reuse = reuse)
111 | return q_values
112 |
113 | with tf.variable_scope('critic') as scope:
114 | # Critic applied to self.state_ph and a given action (for training critic)
115 | q_values_of_given_actions = generate_critic_network(self.state_ph, self.action_ph, trainable = True, reuse = False)
116 | # Critic applied to self.state_ph and the current policy's outputted actions for self.state_ph (for training actor via deterministic policy gradient)
117 | q_values_of_suggested_actions = generate_critic_network(self.state_ph, self.actions, trainable = True, reuse = True)
118 |
119 | # slow target critic network
120 | with tf.variable_scope('slow_target_critic', reuse=False):
121 | # Slow target critic applied to slow target actor's outputted actions for self.next_state_ph (for training critic)
122 | slow_q_values_next = tf.stop_gradient(generate_critic_network(self.next_state_ph, slow_target_next_actions, trainable = False, reuse = False))
123 |
124 | # isolate vars for each network
125 | actor_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')
126 | slow_target_actor_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='slow_target_actor')
127 | critic_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')
128 | slow_target_critic_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='slow_target_critic')
129 |
130 | # update values for slowly-changing targets towards current actor and critic
131 | update_slow_target_ops = []
132 | for i, slow_target_actor_var in enumerate(slow_target_actor_vars):
133 | update_slow_target_actor_op = slow_target_actor_var.assign(tau*actor_vars[i]+(1-tau)*slow_target_actor_var)
134 | update_slow_target_ops.append(update_slow_target_actor_op)
135 |
136 | for i, slow_target_var in enumerate(slow_target_critic_vars):
137 | update_slow_target_critic_op = slow_target_var.assign(tau*critic_vars[i]+(1-tau)*slow_target_var)
138 | update_slow_target_ops.append(update_slow_target_critic_op)
139 |
140 | self.update_slow_targets_op = tf.group(*update_slow_target_ops, name='update_slow_targets')
141 |
142 | # One step TD targets y_i for (s,a) from experience replay
143 | # = r_i + gamma*Q_slow(s',mu_slow(s')) if s' is not terminal
144 | # = r_i if s' terminal
145 | targets = tf.expand_dims(self.reward_ph, 1) + tf.expand_dims(self.is_not_terminal_ph, 1) * gamma * slow_q_values_next
146 |
147 | # 1-step temporal difference errors
148 | td_errors = targets - q_values_of_given_actions
149 |
150 | # critic loss function (mean-square value error with regularization)
151 | critic_loss = tf.reduce_mean(tf.square(td_errors))
152 | for var in critic_vars:
153 | if not 'bias' in var.name:
154 | critic_loss += l2_reg_critic * 0.5 * tf.nn.l2_loss(var)
155 |
156 | # critic optimizer
157 | self.critic_train_op = tf.train.AdamOptimizer(lr_critic*lr_decay**episodes).minimize(critic_loss)
158 |
159 | # actor loss function (mean Q-values under current policy with regularization)
160 | actor_loss = -1*tf.reduce_mean(q_values_of_suggested_actions)
161 | for var in actor_vars:
162 | if not 'bias' in var.name:
163 | actor_loss += l2_reg_actor * 0.5 * tf.nn.l2_loss(var)
164 |
165 | # actor optimizer
166 | # the gradient of the mean Q-values wrt actor params is the deterministic policy gradient (keeping critic params fixed)
167 | self.actor_train_op = tf.train.AdamOptimizer(lr_actor*lr_decay**episodes).minimize(actor_loss, var_list=actor_vars)
168 |
169 | # initialize session
170 | self.sess = tf.Session()
171 | self.sess.run(tf.global_variables_initializer())
172 |
173 |
174 | # Episode variables
175 | self.reset_episode_vars()
176 |
177 |
178 | def add_to_memory(self, experience):
179 | self.replay_memory.append(experience)
180 |
181 | def sample_from_memory(self, minibatch_size):
182 | return random.sample(self.replay_memory, minibatch_size)
183 |
184 |
185 | def reset_episode_vars(self):
186 | self.total_reward = 0
187 | self.steps_in_ep = 0
188 |
189 | # Initialize exploration noise process
190 | self.noise_process = np.zeros(self.action_dim)
191 | self.noise_scale = (self.initial_noise_scale * self.noise_decay**self.ep) * (self.action_space_high - self.action_space_low)
192 | self.ep += 1
193 |
194 | self.last_observation = None
195 | self.last_action = None
196 |
197 | def step(self, observation, reward, done):
198 | # choose action based on deterministic policy
199 | action_for_state, = self.sess.run(self.actions, feed_dict = {
200 | self.state_ph: observation,
201 | self.is_training_ph: False
202 | })
203 |
204 | # add temporally-correlated exploration noise to action (using an Ornstein-Uhlenbeck process)
205 | # print(action_for_state)
206 | self.noise_process = self.exploration_theta*(self.exploration_mu - self.noise_process) + self.exploration_sigma*np.random.randn(self.action_dim)
207 | # print(self.noise_scale*self.noise_process)
208 | action_for_state += self.noise_scale*self.noise_process
209 |
210 | self.total_reward += reward
211 |
212 |
213 | # Save experience / reward
214 | if self.last_observation is not None and self.last_action is not None:
215 | self.add_to_memory((self.last_observation[0], self.last_action, reward, observation[0],
216 | # is next_observation a terminal state?
217 | # 0.0 if done and not env.env._past_limit() else 1.0))
218 | 0.0 if done else 1.0))
219 |
220 | # update network weights to fit a minibatch of experience
221 | if self.total_steps%self.train_every == 0 and len(self.replay_memory) >= self.minibatch_size:
222 |
223 | # grab N (s,a,r,s') tuples from replay memory
224 | minibatch = self.sample_from_memory(self.minibatch_size)
225 |
226 | # update the critic and actor params using mean-square value error and deterministic policy gradient, respectively
227 | _, _ = self.sess.run([self.critic_train_op, self.actor_train_op],
228 | feed_dict = {
229 | self.state_ph: np.asarray([elem[0] for elem in minibatch]),
230 | self.action_ph: np.asarray([elem[1] for elem in minibatch]),
231 | self.reward_ph: np.asarray([elem[2] for elem in minibatch]),
232 | self.next_state_ph: np.asarray([elem[3] for elem in minibatch]),
233 | self.is_not_terminal_ph: np.asarray([elem[4] for elem in minibatch]),
234 | self.is_training_ph: True})
235 |
236 | # update slow actor and critic targets towards current actor and critic
237 | _ = self.sess.run(self.update_slow_targets_op)
238 |
239 |
240 | self.last_observation = observation
241 | self.last_action = action_for_state
242 | self.total_steps += 1
243 | self.steps_in_ep += 1
244 |
245 |
246 | if done:
247 | _ = self.sess.run(self.episode_inc_op)
248 | print('Reward: {:.3f}, Steps: {:.3f}, Final noise scale: {:.3f}, Z: {:.3f}'.format(self.total_reward, self.steps_in_ep, self.noise_scale, state[0]))
249 | print(self.ep, self.total_reward)
250 | self.reset_episode_vars()
251 |
252 | return np.array([0,0] + list(action_for_state) + [0,0,0])
253 |
254 |
255 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | from quad_controller_rl.tasks.base_task import BaseTask
2 | from quad_controller_rl.tasks.takeoff import Takeoff
3 | from quad_controller_rl.tasks.hover import Hover
4 | from quad_controller_rl.tasks.landing import Landing
5 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/tasks/base_task.py:
--------------------------------------------------------------------------------
1 | """Generic base class for reinforcement learning tasks."""
2 |
3 | from geometry_msgs.msg import Vector3, Point, Quaternion, Pose, Twist, Wrench
4 |
5 | class BaseTask:
6 | """Generic base class for reinforcement learning tasks.
7 |
8 | Concrete subclasses should:
9 | - Specify state and action spaces, initial condition, reward function.
10 | - Call agent on update, when new state is available, and pass back action.
11 | - Convert ROS messages to/from standard NumPy vectors for state and action.
12 | - Check for episode termination.
13 | """
14 |
15 | def __init__(self):
16 | """Define state and action spaces, initialize other task parameters."""
17 | pass
18 |
19 | def set_agent(self, agent):
20 | """Set an agent to carry out this task; to be called from update."""
21 | self.agent = agent
22 |
23 | def reset(self):
24 | """Reset task and return initial condition.
25 |
26 | Called at the beginning of each episode, including the very first one.
27 | Reset/initialize any episode-specific variables/counters/etc.;
28 | then return initial pose and velocity for next episode.
29 |
30 | Returns
31 | =======
32 | tuple: initial_pose, initial_force
33 | - initial_pose: Pose object defining initial position and orientation
34 | - initial_velocity: Twist object defining initial linear and angular velocity
35 | """
36 | raise NotImplementedError("{} must override reset()".format(self.__class__.__name__))
37 |
38 | def update(self, timestamp, pose, angular_velocity, linear_acceleration):
39 | """Process current data, call agent, return action and done flag.
40 |
41 | Use current data to prepare state vector (need not use all available data);
42 | compute reward and check for episode termination (done flag); call agent.step()
43 | with state, reward, done to obtain action; pass back action, done.
44 |
45 | Params
46 | ======
47 | - timestamp: current time in seconds since episode started
48 | - pose: Pose object containing current position and orientation
49 | - angular_velocity: Vector3 object, current angular velocity
50 | - linear_acceleration: Vector3 object, current linear acceleration
51 |
52 | Returns
53 | =======
54 | tuple: action, done
55 | - action: Wrench object indicating force and torque to apply
56 | - done: boolean indicating whether this episode is complete
57 | """
58 | raise NotImplementedError("{} must override update()".format(self.__class__.__name__))
59 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/tasks/hover.py:
--------------------------------------------------------------------------------
1 | """Takeoff task."""
2 |
3 | import numpy as np
4 | from gym import spaces
5 | from geometry_msgs.msg import Vector3, Point, Quaternion, Pose, Twist, Wrench
6 | from quad_controller_rl.tasks.base_task import BaseTask
7 |
8 | class Hover(BaseTask):
9 | """Simple task where the goal is to lift off the ground and reach a target height."""
10 |
11 | def __init__(self):
12 | # State space:
13 | cube_size = 300.0 # env is cube_size x cube_size x cube_size
14 | self.observation_space = spaces.Box(
15 | np.array([- cube_size / 2, - cube_size / 2, 0.0, -1.0, -1.0, -1.0, -1.0]),
16 | np.array([ cube_size / 2, cube_size / 2, cube_size, 1.0, 1.0, 1.0, 1.0])
17 | )
18 | #print("Takeoff(): observation_space = {}".format(self.observation_space)) # [debug]
19 |
20 | # Action space:
21 | max_force = 25.0
22 | max_torque = 25.0
23 | self.action_space = spaces.Box(
24 | np.array([-max_force, -max_force, -max_force, -max_torque, -max_torque, -max_torque]),
25 | np.array([ max_force, max_force, max_force, max_torque, max_torque, max_torque])
26 | )
27 | #print("Takeoff(): action_space = {}".format(self.action_space)) # [debug]
28 |
29 | # Task-specific parameters
30 | self.max_duration = 5.0 # secs
31 | self.target_z = 10.0 # target height (z position) to reach for successful takeoff
32 |
33 | def reset(self):
34 | # Nothing to reset; just return initial condition
35 | return Pose(
36 | position=Point(0.0, 0.0, 10.0), # drop off from a slight random height
37 | orientation=Quaternion(0.0, 0.0, 0.0, 0.0),
38 | ), Twist(
39 | linear=Vector3(0.0, 0.0, 0.0),
40 | angular=Vector3(0.0, 0.0, 0.0)
41 | )
42 |
43 | def update(self, timestamp, pose, angular_velocity, linear_acceleration):
44 | # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
45 | state = np.array([
46 | pose.position.x, pose.position.y, pose.position.z,
47 | pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]).reshape(1, -1)
48 |
49 | # Compute reward / penalty and check if this episode is complete
50 | done = False
51 | reward = -abs(self.target_z - pose.position.z)
52 |
53 | def is_equal(x, y, delta=0.0):
54 | return abs(x-y) <= delta
55 |
56 | if is_equal(self.target_z, pose.position.z, delta=1):
57 | reward += 10.0 # bonus reward
58 | # done = True
59 |
60 | if timestamp > self.max_duration:
61 | # reward -= 10.0 # extra penalty
62 | done = True
63 |
64 |
65 | # Take one RL step, passing in current state and reward, and obtain action
66 | # Note: The reward passed in here is the result of past action(s)
67 | action = self.agent.step(state, reward, done) # note: action = vector
68 |
69 | # Convert to proper force command (a Wrench object) and return it
70 | if action is not None:
71 | action = np.clip(action.flatten(), self.action_space.low, self.action_space.high) # flatten, clamp to action space limits
72 | return Wrench(
73 | force=Vector3(action[0], action[1], action[2]),
74 | torque=Vector3(action[3], action[4], action[5])
75 | ), done
76 | else:
77 | return Wrench(), done
78 |
79 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/tasks/landing.py:
--------------------------------------------------------------------------------
1 | """Takeoff task."""
2 |
3 | import numpy as np
4 | from gym import spaces
5 | from geometry_msgs.msg import Vector3, Point, Quaternion, Pose, Twist, Wrench
6 | from quad_controller_rl.tasks.base_task import BaseTask
7 |
8 | class Landing(BaseTask):
9 | """Simple task where the goal is to lift off the ground and reach a target height."""
10 |
11 | def __init__(self):
12 | # State space:
13 | cube_size = 300.0 # env is cube_size x cube_size x cube_size
14 | self.observation_space = spaces.Box(
15 | np.array([- cube_size / 2, - cube_size / 2, 0.0, -1.0, -1.0, -1.0, -1.0]),
16 | np.array([ cube_size / 2, cube_size / 2, cube_size, 1.0, 1.0, 1.0, 1.0])
17 | )
18 | #print("Takeoff(): observation_space = {}".format(self.observation_space)) # [debug]
19 |
20 | # Action space:
21 | max_force = 25.0
22 | max_torque = 25.0
23 | self.action_space = spaces.Box(
24 | np.array([-max_force, -max_force, -max_force, -max_torque, -max_torque, -max_torque]),
25 | np.array([ max_force, max_force, max_force, max_torque, max_torque, max_torque])
26 | )
27 | #print("Takeoff(): action_space = {}".format(self.action_space)) # [debug]
28 |
29 | # Task-specific parameters
30 | self.max_duration = 5.0 # secs
31 | self.target_z = 0.0 # target height (z position) to reach for successful takeoff
32 |
33 | def reset(self):
34 | # Nothing to reset; just return initial condition
35 | return Pose(
36 | position=Point(0.0, 0.0, 10.0), # drop off from a slight random height
37 | orientation=Quaternion(0.0, 0.0, 0.0, 0.0),
38 | ), Twist(
39 | linear=Vector3(0.0, 0.0, 0.0),
40 | angular=Vector3(0.0, 0.0, 0.0)
41 | )
42 |
43 | def update(self, timestamp, pose, angular_velocity, linear_acceleration):
44 | # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
45 | state = np.array([
46 | pose.position.x, pose.position.y, pose.position.z,
47 | pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w
48 | linear_acceleration.x, linear_acceleration.y, linear_acceleration.z]).reshape(1, -1)
49 |
50 | # Compute reward / penalty and check if this episode is complete
51 | done = False
52 | reward = -abs(self.target_z - pose.position.z)
53 | reward += -abs(linear_acceleration.z)
54 | if pose.position.z == self.target_z:
55 | reward += 10.0 # bonus reward
56 | done = True
57 |
58 | if timestamp > self.max_duration:
59 | reward -= 10.0 # extra penalty
60 | done = True
61 |
62 |
63 | # Take one RL step, passing in current state and reward, and obtain action
64 | # Note: The reward passed in here is the result of past action(s)
65 | action = self.agent.step(state, reward, done) # note: action = vector
66 |
67 | # Convert to proper force command (a Wrench object) and return it
68 | if action is not None:
69 | action = np.clip(action.flatten(), self.action_space.low, self.action_space.high) # flatten, clamp to action space limits
70 | return Wrench(
71 | force=Vector3(action[0], action[1], action[2]),
72 | torque=Vector3(action[3], action[4], action[5])
73 | ), done
74 | else:
75 | return Wrench(), done
76 |
77 |
--------------------------------------------------------------------------------
/src/quad_controller_rl/tasks/takeoff.py:
--------------------------------------------------------------------------------
1 | """Takeoff task."""
2 |
3 | import numpy as np
4 | from gym import spaces
5 | from geometry_msgs.msg import Vector3, Point, Quaternion, Pose, Twist, Wrench
6 | from quad_controller_rl.tasks.base_task import BaseTask
7 |
8 | class Takeoff(BaseTask):
9 | """Simple task where the goal is to lift off the ground and reach a target height."""
10 |
11 | def __init__(self):
12 | # State space:
13 | cube_size = 300.0 # env is cube_size x cube_size x cube_size
14 | self.observation_space = spaces.Box(
15 | np.array([- cube_size / 2, - cube_size / 2, 0.0, -1.0, -1.0, -1.0, -1.0]),
16 | np.array([ cube_size / 2, cube_size / 2, cube_size, 1.0, 1.0, 1.0, 1.0])
17 | )
18 | #print("Takeoff(): observation_space = {}".format(self.observation_space)) # [debug]
19 |
20 | # Action space:
21 | max_force = 25.0
22 | max_torque = 25.0
23 | self.action_space = spaces.Box(
24 | np.array([-max_force, -max_force, -max_force, -max_torque, -max_torque, -max_torque]),
25 | np.array([ max_force, max_force, max_force, max_torque, max_torque, max_torque])
26 | )
27 | #print("Takeoff(): action_space = {}".format(self.action_space)) # [debug]
28 |
29 | # Task-specific parameters
30 | self.max_duration = 5.0 # secs
31 | self.target_z = 10.0 # target height (z position) to reach for successful takeoff
32 |
33 | def reset(self):
34 | # Nothing to reset; just return initial condition
35 | return Pose(
36 | position=Point(0.0, 0.0, np.random.normal(0.5, 0.1)), # drop off from a slight random height
37 | orientation=Quaternion(0.0, 0.0, 0.0, 0.0),
38 | ), Twist(
39 | linear=Vector3(0.0, 0.0, 0.0),
40 | angular=Vector3(0.0, 0.0, 0.0)
41 | )
42 |
43 | def update(self, timestamp, pose, angular_velocity, linear_acceleration):
44 | # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
45 | state = np.array([
46 | pose.position.x, pose.position.y, pose.position.z,
47 | pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]).reshape(1, -1)
48 |
49 | # Compute reward / penalty and check if this episode is complete
50 | done = False
51 | reward = -min(abs(self.target_z - pose.position.z), 20.0) # reward = zero for matching target z, -ve as you go farther, upto -20
52 | if pose.position.z >= self.target_z: # agent has crossed the target height
53 | reward += 10.0 # bonus reward
54 | done = True
55 | elif timestamp > self.max_duration: # agent has run out of time
56 | reward -= 10.0 # extra penalty
57 | done = True
58 |
59 | # Take one RL step, passing in current state and reward, and obtain action
60 | # Note: The reward passed in here is the result of past action(s)
61 | action = self.agent.step(state, reward, done) # note: action = vector
62 |
63 | # Convert to proper force command (a Wrench object) and return it
64 | if action is not None:
65 | action = np.clip(action.flatten(), self.action_space.low, self.action_space.high) # flatten, clamp to action space limits
66 | return Wrench(
67 | force=Vector3(action[0], action[1], action[2]),
68 | torque=Vector3(action[3], action[4], action[5])
69 | ), done
70 | else:
71 | return Wrench(), done
72 |
73 |
--------------------------------------------------------------------------------
/srv/SetPose.srv:
--------------------------------------------------------------------------------
1 | geometry_msgs/Pose pose
2 | ---
3 | bool success
4 | string message
5 |
--------------------------------------------------------------------------------