├── .gitignore ├── LICENSE ├── README.md ├── examples ├── results │ ├── ddpg_on_pendulum.png │ ├── dqn_on_cartpole.png │ └── qlearning_on_cliffwalking.png ├── use_ddpg_in_pendulum.ipynb ├── use_dqn_in_cartpole.ipynb └── use_qlearning_in_cliff_walking.ipynb ├── life ├── base │ ├── __pycache__ │ │ ├── q_learning.cpython-37.pyc │ │ ├── sarsa.cpython-37.pyc │ │ └── trainer.cpython-37.pyc │ ├── q_learning.py │ ├── sarsa.py │ └── trainer.py ├── dqn │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── dqn.cpython-37.pyc │ │ ├── dqn_improved.cpython-37.pyc │ │ └── trainer.cpython-37.pyc │ ├── dqn.py │ ├── dqn_improved.py │ └── trainer.py ├── envs │ ├── __pycache__ │ │ ├── cliffwalking.cpython-37.pyc │ │ ├── con_env_demo.cpython-37.pyc │ │ └── dis_env_demo.cpython-37.pyc │ ├── cliffwalking.py │ ├── con_env_demo.py │ └── dis_env_demo.py ├── imitation │ ├── __init__.py │ ├── bc.py │ ├── gail.py │ └── trainer.py ├── policy │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── ddpg.cpython-37.pyc │ │ ├── ppo.cpython-37.pyc │ │ ├── reinforce.cpython-37.pyc │ │ ├── sac.cpython-37.pyc │ │ └── trainer.cpython-37.pyc │ ├── ac │ │ ├── a3c.py │ │ └── ac.py │ ├── ddpg.py │ ├── ppo.py │ ├── reinforce.py │ ├── sac.py │ └── trainer.py ├── test │ ├── test_dqn.py │ ├── test_off_policy.py │ ├── test_on_policy.py │ └── test_ql.py └── utils │ ├── __pycache__ │ ├── calculator.cpython-37.pyc │ └── cont2disp.cpython-37.pyc │ ├── calculator.py │ ├── cont2disp.py │ └── replay │ ├── __pycache__ │ └── replay_buffer.cpython-37.pyc │ ├── per_replay_buffer.py │ └── replay_buffer.py └── main.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 HanggeAi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Life 2 | Life is a library for reinforce learning implemented by PyTorch. 3 | ![190503058512b5a059717be2719e6a1](https://user-images.githubusercontent.com/106570281/220634585-7f9375f2-599f-479c-bceb-f624f9932528.jpg) 4 | ## 目前,Life库实现的算法有: 5 | - Sarsa 6 | - multi-Sarsa 7 | - Q-Learning 8 | - Dyna-Q 9 | - DQN 10 | - Double-DQN 11 | - Dueling-DQN 12 | - REINFORCE策略梯度 13 | - Actor-Critic 14 | - PPO 15 | - DDPG 16 | - SAC 17 | - BC 18 | - GAIL 19 | - CQL 20 | # 安装(install) 21 | ```bash 22 | pip install rllife 23 | ``` 24 | ## 或者 25 | 你可以在[PyPI](https://pypi.org/project/rllife/#files)上面下载.gz文件,然后通过本地安装。 26 | # requirement 27 | ```bash 28 | pyyaml==6.0 29 | ipykernel==6.15.1 30 | jupyter==1.0.0 31 | matplotlib==3.5.3 32 | seaborn==0.12.1 33 | dill==0.3.5.1 34 | argparse==1.4.0 35 | pandas==1.3.5 36 | pyglet==1.5.26 37 | importlib-metadata<5.0 38 | setuptools==65.2.0 39 | gym==0.25.2 40 | numpy==1.21.6 41 | pandas==1.3.4 42 | torch==1.10.0 43 | tqdm==4.64.1 44 | ``` 45 | ## 主要特征 46 | - 基于目前主流的深度学习框架pytorch,支持gpu加速。 47 | - 简洁易用,仅需寥寥几行代码,即可实现强化学习算法的构建与训练。 48 | - 覆盖面广,从传统的QLearning,到一些最新的强化学习算法都有实现。 49 | - 所有超参均支持自定义,同时可自定义深度神经网络的结构,封装程度低而又简介易用。 50 | ## 图解Life的结构 51 | ![life_struct](https://user-images.githubusercontent.com/106570281/221387421-566e1444-ea61-48ed-b68e-34ee1725560f.jpg) 52 | ## Life将强化学习算法分为以下几类: 53 | 1. 传统的强化学习算法,如Sarsa; 54 | 2. 只基于值函数的深度强化学习算法,如DQN; 55 | 3. 基于策略函数和值函数的深度强化学习算法,如AC; 56 | 4. 模仿强化学习算法,如BC; 57 | 5. 离线强化学习算法,如CQL。 58 | ## 对于每一类强化学习算法,都配有一个训练器 59 | 训练器的名称和算法的名称是一一对应的,如要训练```DQN```,则其训练函数的名称为: 60 | ```train_dqn``` 61 | ### 以DQN为例,其结构如下 62 | ![dqn_struct](https://user-images.githubusercontent.com/106570281/221387444-67dc5dc9-4ba1-4707-9bcc-d8ae9abdb7cf.jpg) 63 | 其中: 64 | - dqn.py中为传统DQN算法 65 | - dqn_improved.py中为一些改进的DQN算法 66 | - trainer中包含了以上各种dqn算法的训练函数 67 | # Get Started 68 | 要使用Life进行强化学习,仅需简单的三步,下面以DQN在CartPole环境上的训练为例进行快速入门: 69 | ## 第一步,导入相关的模块 70 | ```python 71 | from life.dqn.dqn import DQN # 导入模型 72 | from life.dqn.trainer import train_dqn # 导入训练器 73 | from life.envs.dis_env_demo import make # 环境的一个例子 74 | from life.utils.replay.replay_buffer import ReplayBuffer # 回放池 75 | import torch 76 | import matplotlib.pyplot as plt 77 | ``` 78 | ## 第二步,设置超参数,并构建模型 79 | ```python 80 | # 设置超参数 81 | lr = 2e-3 82 | num_episodes = 500 83 | hidden_dim = 128 84 | gamma = 0.98 85 | epsilon = 0.01 86 | target_update = 10 87 | buffer_size = 10000 88 | minimal_size = 500 89 | batch_size = 64 90 | device = torch.device("cpu") # 也可指定为gpu : torch.device("cuda") 91 | 92 | env=make() # 建立环境,这里为 CartPole-v0 93 | replay_buffer = ReplayBuffer(buffer_size) # 回放池 94 | state_dim = env.observation_space.shape[0] 95 | action_dim = env.action_space.n 96 | 97 | # 建立模型 98 | agent = DQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon, 99 | target_update, device) # DQN模型 100 | ``` 101 | 注意,如果你足够细心,你会发现在上述建立DQN的过程中,我们没有传入一个Neural Network,这是因为在建立深度强化学习时,Life提供了一个默认的双层神经网络作为建立DQN的**默认神经网络**。当然,你也可以**使用自己设计的神经网络结构:** 102 | ```python 103 | class YourNet: 104 | """your network for your task""" 105 | pass 106 | 107 | 108 | agent = DQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon, 109 | target_update, device, q_net=YourNet) # DQN模型 110 | ``` 111 | 此时,原本用于传递给默认神经网络的超参数state_dim,hidden_dim,action_dim就没有用了,可随意设置。 112 | ## 第三步,使用训练器训练模型 113 | ```python 114 | result=train_dqn(agent,env,replay_buffer,minimal_size,batch_size,num_episodes) 115 | ``` 116 | ## 上述训练函数返回的是:训练过程中每个回合的汇报,如果你想的话,可以将其可视化出来: 117 | ```python 118 | episodes_list = list(range(len(result))) 119 | plt.figure(figsize=(8,6)) 120 | plt.plot(episodes_list, result) 121 | plt.xlabel("Episodes") 122 | plt.ylabel("Returns") 123 | plt.title("DQN on {}".format("Cart Pole v1")) 124 | plt.show() 125 | ``` 126 | 得到: 127 | ![dqn_on_cartpole](https://user-images.githubusercontent.com/106570281/221387500-714d271b-51fa-43b5-9025-56dd4b5c76b7.png) 128 | ## 当然,如果你需要智能体的话,也可以设置```return_agent=True```,这会返回一个元组```(return_list, agent)``` 129 | 其中,```return_list```为:训练过程中每个回合的汇报,```agent```为训练好的智能体。 130 | ```return_agent```默认为```False```。 131 | 132 | **可见,除了超参数的设置之外,我们构建DQN算法只使用了两行代码:** 133 | ```python 134 | from life.dqn.dqn import DQN 135 | agent = DQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon,target_update, device) 136 | ``` 137 | **我们训练DQN同样只使用了两行代码:** 138 | ```python 139 | from life.dqn.trainer import train_dqn 140 | result=train_dqn(agent,env,replay_buffer,minimal_size,batch_size,num_episodes) 141 | ``` 142 | ### 这让我们的强化学习实现的相当简洁和方便! 143 | 144 | ## 上述的例子在项目的examples中 145 | # 关于名称与LOGO 146 | - Life的中文含义为:生命,生活,强化学习本来就是人生的一个过程,我们无时无刻不在进行着强化学习。强化学习不仅是一种科学的决策方法,各种算法的思想也给予我们很多人生的哲理,使人受益匪浅。 147 | - LOGO 底色采用深蓝色,图案和文字采用浅蓝白色,整体端庄严谨,富有科技感。文字部分由项目名称LIFE字样和寄语:RL IS THE PROCESS OF LIFE 即可以理解为强化学习是人生的过程,也可以理解为强化学习是Life库的程序,一语双关。 148 | - LOGO图案部分为4个伸长了的F,同时将F上面一个笔画伸长,使其左旋90°时形成L字样,为LIFE的简写LF; 同时致敬OpenAI的LOGO: 149 | ![image](https://user-images.githubusercontent.com/106570281/221387550-49896c2c-dfa9-4f35-a2d6-56314e8cb44f.png) 150 | 151 | -------------------------------------------------------------------------------- /examples/results/ddpg_on_pendulum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/examples/results/ddpg_on_pendulum.png -------------------------------------------------------------------------------- /examples/results/dqn_on_cartpole.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/examples/results/dqn_on_cartpole.png -------------------------------------------------------------------------------- /examples/results/qlearning_on_cliffwalking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/examples/results/qlearning_on_cliffwalking.png -------------------------------------------------------------------------------- /examples/use_ddpg_in_pendulum.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "# 第一步,导入相关的模块" 7 | ], 8 | "metadata": { 9 | "collapsed": false 10 | } 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 3, 15 | "outputs": [], 16 | "source": [ 17 | "from life.policy.ddpg import DDPG\n", 18 | "from life.policy.trainer import train_ddpg\n", 19 | "from life.envs.con_env_demo import make\n", 20 | "from life.utils.replay.replay_buffer import ReplayBuffer\n", 21 | "import torch\n", 22 | "import matplotlib.pyplot as plt" 23 | ], 24 | "metadata": { 25 | "collapsed": false 26 | } 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "source": [ 31 | "# 第二步,设置超参数,并建立模型" 32 | ], 33 | "metadata": { 34 | "collapsed": false 35 | } 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 2, 40 | "outputs": [ 41 | { 42 | "name": "stderr", 43 | "output_type": "stream", 44 | "text": [ 45 | "D:\\anocoodaa\\envs\\zyhrl\\lib\\site-packages\\gym\\core.py:318: DeprecationWarning: \u001B[33mWARN: Initializing wrapper in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\u001B[0m\n", 46 | " \"Initializing wrapper in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\"\n", 47 | "D:\\anocoodaa\\envs\\zyhrl\\lib\\site-packages\\gym\\wrappers\\step_api_compatibility.py:40: DeprecationWarning: \u001B[33mWARN: Initializing environment in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\u001B[0m\n", 48 | " \"Initializing environment in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\"\n" 49 | ] 50 | } 51 | ], 52 | "source": [ 53 | "# 设置超参数\n", 54 | "actor_lr = 3e-4\n", 55 | "critic_lr = 3e-3\n", 56 | "num_episodes = 200\n", 57 | "hidden_dim = 64\n", 58 | "gamma = 0.98\n", 59 | "tau = 0.005 # 软更新参数\n", 60 | "buffer_size = 10000\n", 61 | "minimal_size = 1000\n", 62 | "batch_size = 64\n", 63 | "sigma = 0.01 # 高斯噪声标准差\n", 64 | "device = torch.device(\"cpu\")\n", 65 | "\n", 66 | "env=make()\n", 67 | "replay_buffer = ReplayBuffer(buffer_size)\n", 68 | "state_dim = env.observation_space.shape[0]\n", 69 | "action_dim = env.action_space.shape[0]\n", 70 | "action_bound = env.action_space.high[0] # 动作最大值\n", 71 | "\n", 72 | "# 建立模型\n", 73 | "agent = DDPG(state_dim, action_dim, state_dim+action_dim,hidden_dim,\n", 74 | " False,action_bound, sigma, actor_lr, critic_lr, tau, gamma, device)" 75 | ], 76 | "metadata": { 77 | "collapsed": false 78 | } 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "source": [ 83 | "# 第三步,训练模型" 84 | ], 85 | "metadata": { 86 | "collapsed": false 87 | } 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 4, 92 | "outputs": [ 93 | { 94 | "name": "stderr", 95 | "output_type": "stream", 96 | "text": [ 97 | "Iteration 0: 0%| | 0/20 [00:00", 135 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAswAAAIjCAYAAAAXw01NAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAADFLElEQVR4nOydeZgcZb39T/U++0wms2RfgbAvQULCFiASuLigglwVDSCrokJQfkExioh49QaDyhUV2QQuq3JZIhDDokDYSSBAWEP2WbLMPr3X74/u9623qqvX6Z6p6ZzP88yTTE91d3VP9/SpU+c9X03XdR2EEEIIIYQQW1wjvQOEEEIIIYQ4GQpmQgghhBBCMkDBTAghhBBCSAYomAkhhBBCCMkABTMhhBBCCCEZoGAmhBBCCCEkAxTMhBBCCCGEZICCmRBCCCGEkAxQMBNCCCGEEJIBCmZCCCEjzm233QZN0/DJJ5/kfd1nnnkGmqbhmWeeKfp+EUIIQMFMCBmlCIElvgKBAMaPH4+FCxfit7/9LXp7e1Ou89Of/tR0ncrKSkyePBmf/exnceuttyIUCqVc5+yzzzZdp7a2FgcffDCWLVtmu/2bb76Jc845B9OmTUMgEEB1dTUOOeQQXHHFFfj4449L8lwUgt1zsd9+++Gqq65CT0/PSO/eHsPLL7+Mb33rW5g9eza8Xi80TRvpXSKE2OAZ6R0ghJCh8LOf/QzTpk1DJBJBW1sbnnnmGVx66aW4/vrr8fDDD+Oggw5Kuc4f/vAHVFdXIxQKYevWrXjiiSdw7rnnYvny5Xj00UcxadIk0/Z+vx8333wzAKCrqwsPPvggvv/97+OVV17BPffcI7f785//jIsvvhhjx47F1772NcyaNQvRaBTr1q3DHXfcgeXLl2NwcBBut7u0T0oeiOeir68PTz75JK699lo89dRTeP755ynehoEVK1bg5ptvxkEHHYTp06fj/fffH+ldIoTYoRNCyCjk1ltv1QHor7zySsrPVq1apVdUVOhTpkzRBwYG5OU/+clPdAB6Z2dnynXuvPNO3eVy6XPmzDFdvmjRIr2qqsp0WSwW0w8//HAdgL5161Zd13X9+eef191ut37sscfqPT09Kbc/ODioX3XVVXo0Gi3o8RabdM/FF7/4RR2A/sILLwzr/ojf54YNG/K+7tNPP60D0J9++umi71epaWtrk6/Rb3/72zo/lglxJoxkEELKjhNOOAE//vGPsXHjRtx55505XedrX/sazjvvPLz00ktYuXJlxm1dLhfmz58PADJze/XVV0PTNNx1112oqalJuU4gEMA111yTk7v8xhtv4JRTTkFtbS2qq6tx4okn4sUXXzRtIyIpzz//PBYvXoympiZUVVXhC1/4Ajo7O3N6zHaccMIJAIANGzYAAOLxOJYvX479998fgUAALS0tuPDCC7F7927T9aZOnYrPfOYzeO6553DEEUcgEAhg+vTpuOOOO1Lu4+2338YJJ5yAiooKTJw4ET//+c8Rj8dTttM0DT/96U9TLp86dSrOPvvsjI8j3Tbz58+XvzvAyD/fd999uPrqqzFhwgTU1NTg9NNPR3d3N0KhEC699FI0Nzejuroa55xzjm0UR+WSSy5BdXU1BgYGUn72la98Ba2trYjFYgCAlpYWVFRUZLw9QsjIQ8FMCClLvv71rwMAnnzyyZJc56OPPgIANDY2YmBgAE899RTmz5+PiRMnFrC3Bm+//TaOOeYYrF27FldccQV+/OMfY8OGDZg/fz5eeumllO2/853vYO3atfjJT36Ciy++GI888gguueSSgu9ffVwAcOGFF+IHP/gBjjrqKNxwww0455xzcNddd2HhwoWIRCKm63744Yc4/fTT8elPfxrLli1DQ0MDzj77bLz99ttym7a2Nhx//PFYs2YNlixZgksvvRR33HEHbrjhhoL3uRhcd911eOKJJ7BkyRKce+65+Nvf/oaLLroI5557Lt5//3389Kc/xRe/+EXcdttt+K//+q+Mt3XmmWeiv78fjz32mOnygYEBPPLIIzj99NMdFcshhGSHGWZCSFkyceJE1NXVSQGYCwcccAAA2F5nx44dAIDu7m7cd999eOihh3DQQQdhn332wZtvvoloNCqvr7Jr1y6Te1pbWwufz5d2H6666ipEIhE899xzmD59OgDgG9/4BvbZZx9cccUVePbZZ03bNzY24sknn5R543g8jt/+9rfo7u5GXV1d1se8a9cuAJAZ5v/5n/9BS0sLjjnmGDz33HO4+eabcdddd+GrX/2qvM7xxx+Pk08+Gffff7/p8vfeew//+te/cMwxxwAAvvzlL2PSpEm49dZb8d///d8AgP/6r/9CZ2cnXnrpJRxxxBEAgEWLFmGvvfbKuq+lJBqN4tlnn4XX6wUAdHZ24p577sHJJ5+MFStWAAC+9a1v4cMPP8Qtt9yCpUuXpr2to48+GhMmTMC9996LM844Q17+2GOPob+/H2eeeWZpHwwhpOjQYSaElC3V1dW2bRmZtgeQcp3+/n40NTWhqakJM2fOxA9/+EPMnTsXf//73wFAtkqI66tMnz5dXrepqQkPP/xw2vuPxWJ48skncdppp0mxDADjxo3DV7/6VTz33HMpDRYXXHCBaXHeMcccg1gsho0bN+b0mPfZZx80NTVh2rRpuPDCCzFz5kw89thjqKysxP3334+6ujp8+tOfxo4dO+TX7NmzUV1djaefftp0W/vtt58UywDQ1NSEffbZx9QOsmLFChx55JFSLIvtvva1r+W0v6XiG9/4hhTLADBnzhzouo5zzz3XtN2cOXOwefNmRKPRtLelaRrOOOMMrFixAn19ffLye++9FxMmTMDRRx9d/AdACCkpdJgJIWVLX18fmpub89oeQEoGORAI4JFHHgGQaMyYNm2aKXohtlfFkeD//u//EIlEsHbtWnz/+9/PeP+dnZ0YGBjAPvvsk/KzfffdF/F4HJs3b8b+++8vL588ebJpu4aGBgBIyRin48EHH0RtbS28Xi8mTpyIGTNmyJ998MEH6O7uTvscdnR0mL637ovYH3VfNm7ciDlz5qRsZ/eYhxPrvgt33tqYUldXh3g8ju7ubhlbsePMM8/E8uXL8fDDD+OrX/0q+vr6sGLFClx44YVsHyFkFELBTAgpS7Zs2YLu7m7MnDkz5+usW7cOAFKu43a7sWDBgrTXmzlzJjwej7y+ynHHHQcA8HhK8+c2XRZW1/Wcrn/sscdi7Nixtj+Lx+Nobm7GXXfdZfvzpqamou5LrogFc5lIJ0pjsZjtfqbb90If05FHHompU6fivvvuw1e/+lU88sgjGBwcZByDkFEKBTMhpCz561//CgBYuHBhSa8DAFVVVZg/fz6effZZbN26FRMmTMjr+oKmpiZUVlbivffeS/nZ+vXr4XK5UhzPUjJjxgz885//xFFHHVW0JocpU6bggw8+SLnc7jE3NDSgq6vLdFk4HMb27duz3o/ddYGEw63GXUrJl7/8Zdxwww3o6enBvffei6lTp+LII48clvsmhBQXZpgJIWXHU089hWuuuQbTpk3LORt799134+abb8bcuXNx4okn5n2fS5cuRSwWw1lnnWUbzcjFZXW73TjppJPwf//3f6YR0e3t7bj77rtx9NFHo7a2Nu99K5Qvf/nLiMViuOaaa1J+Fo1GbQVpNv7jP/4DL774Il5++WV5WWdnp62LPWPGDPzrX/8yXfanP/0pJ4d5xowZePHFFxEOh+Vljz76KDZv3pz3PhfKmWeeiVAohNtvvx2PP/44vvzlLw/bfRNCigsdZkLIqOYf//gH1q9fj2g0ivb2djz11FNYuXIlpkyZgocffhiBQCDlOg888ACqq6sRDoflpL/nn38eBx98MO6///6C9uOYY47B73//e3znO9/BXnvtJSf9hcNhvP/++7jrrrvg8/nQ2tqa8XZ+/vOfY+XKlTj66KPxrW99Cx6PB3/84x8RCoXwq1/9qqB9K5TjjjsOF154Ia677jqsWbMGJ510ErxeLz744APcf//9uOGGG3D66afndZtXXHEF/vrXv+Lkk0/G9773PVRVVeFPf/oTpkyZgjfffNO07XnnnYeLLroIX/rSl/DpT38aa9euxRNPPJE2QmK97gMPPICTTz4ZX/7yl/HRRx/hzjvvNGW0S81hhx2GmTNn4kc/+hFCoZBtHGPjxo3yzMarr74KIPEaABJuvKg6JISMLBTMhJBRjaj38vl8GDNmDA488EAsX74c55xzju0AEQC4+OKLASQW840dOxaHHHIIbrnlFnz1q1+F3+8veF8uvvhizJ07F7/5zW9w//33o62tDV6vFzNmzMCiRYtw8cUXZxVs+++/P/7973/jyiuvxHXXXYd4PI45c+bgzjvvtF0sV2puuukmzJ49G3/84x/xwx/+EB6PB1OnTsVZZ52Fo446Ku/bGzduHJ5++ml85zvfwS9/+Us0Njbioosuwvjx4/HNb37TtO3555+PDRs24C9/+Qsef/xxHHPMMVi5cmVOZwAWLlyIZcuW4frrr8ell16Kww8/HI8++iguv/zyvPd5KJx55pm49tprMXPmTBx22GEpP9+wYQN+/OMfmy4T3x933HEUzIQ4BE0v9moMQgghhBBCyghmmAkhhBBCCMkABTMhhBBCCCEZoGAmhBBCCCEkAxTMhBBCCCGEZICCmRBCCCGEkAxQMBNCCCGEEJIB9jCXgHg8jm3btqGmpgaapo307hBCCCGEEAu6rqO3txfjx4+Hy5XZQ6ZgLgHbtm3DpEmTRno3CCGEEEJIFjZv3oyJEydm3IaCuQSI6WKbN29GbW3tCO8NIYQQQgix0tPTg0mTJqWdCqtCwVwCRAyjtraWgpkQQgghxMHkEp/loj9CCCGEEEIyQMFMCCGEEEJIBiiYCSGEEEIIyQAFMyGEEEIIIRmgYCaEEEIIISQDFMyEEEIIIYRkgIKZEEIIIYSQDFAwE0IIIYQQkgEKZkIIIYQQQjJAwUwIIYQQQkgGKJgJIYQQQgjJAAUzIYQQQgghGaBgJoQQQgghJAMUzIQQQgghhGSAgjkNN954I6ZOnYpAIIA5c+bg5ZdfHuldIoQQQgghIwAFsw333nsvFi9ejJ/85Cd4/fXXcfDBB2PhwoXo6OgY6V0jhBBCCCHDjKbruj7SO+E05syZg0996lP4/e9/DwCIx+OYNGkSvvOd72DJkiUp24dCIYRCIfl9T08PJk2ahO7ubtTW1g7bfpPMrG/rwdhqP8ZW+/O6nniLaJpWit2ShKIx+D3ukt5HPui6jp7BKOoqvSO9KyWjayCMDTv6ASR+v/uOq3HU74AQQkjp6OnpQV1dXU56jQ6zhXA4jNdeew0LFiyQl7lcLixYsACrV6+2vc51112Huro6+TVp0qTh2t09Al3X8ct/rMcja7cVfBvbugZx6m+fw7m3vZL3fX/5j6vxhf95AfF46Y4tH1m7DfsvfQL/t2Zrye4jHZt3DeAzv/s3HnrDfN+/WPEuDr3mSazd3GW6/IoH1uKHf39rGPewNAQjMSy4/ll84X9ewBf+5wWcduPzuPCvrxV8e10DYZy47Bn81+Pri7iX+dM9EMHJy/+F5f98f0T3gxBCygkKZgs7duxALBZDS0uL6fKWlha0tbXZXufKK69Ed3e3/Nq8efNw7Ooewwcdfbjp2Y/wixXvFnwbH3b0IRbX8UnSTcyV3lAUr3yyG2s2d6G9N1jw/WfjtY27EY3rWP3RzpLdRzr+/cEOrNvagwdf32K6fO2WbsR14O1tPfKyroEw7nt1C+5+aRO6BsLDvatFZWvXIHb0heHSgHF1AQDAm1u6C7691zbuxked/SkHHsPNG5t3Y31bLx54bUv2jYeJzbsG8I1bXsbzH+7IafuOniC6ByIl3itCCMkdCuYi4Pf7UVtba/oixWMgHAMA9IeiBd9GR29I3lY+KST1Ptt7Qhm2HBriftp7SifK0xGM2D+/4nv18t6g8f+2EdjXYiKe66ljq/DYd48BAOzqDyMcjRd0e+L56OgNIVbCsxHZ6A/F5H44JXH3+Lo2/Ov9Tvzvy5uybjsQjuLEZc/itP95fhj2jBBCcoOC2cLYsWPhdrvR3t5uury9vR2tra0jtFd7NpFYQsCEChQyANCZFMzRuJ7X7ahisa27dAKxP5y4HyHshxPxfAihJfcp+dh7leegb5iej+FACObW2gAaKr3wuhMZ9c6+wn4H4oAqFtexs8DbKAbi9xaOxtE96AyXtjeY2I++HA56d/SG0RuKYsOOfscIfkIIoWC24PP5MHv2bKxatUpeFo/HsWrVKsydO3cE92zPRRXMhX6Adihxinyc6j5FRHaUMJIhxGopXex0hKKJ+7aKGfHY1efL7LiPbsHc1p14rltqA9A0Dc01iVhGoY+rQ7neSPweBeoBzkgcgNkhXkt9wezvvWDUeM+FY4UfJBNCSDGhYLZh8eLF+POf/4zbb78d7777Li6++GL09/fjnHPOGeld2yOJxAyRXOgHqCocrE5qJobNYU7ez87+kDxAGC6CkaTDHM4hkqH8f3uZOMwttQmh3FybaE/pKFDsqhGVkYyrOPGgRuxTLg6ziAgBKDgeQwghxcYz0jvgRM4880x0dnZi6dKlaGtrwyGHHILHH388ZSEgGR6iioAMReMF1X51KoI5lw9tu21LmmFO5rR1HdjRF8K4uoqS3ZcV4TCrQisW1zGYFC69ZeowG5GMhFBurkkK5gLPJKivj5F8btTfUaHiv9j0JQ/GrAdldqiRKQpmQohToGBOwyWXXIJLLrlkpHeDACbHNRSJA4H8b0MVzAM5fGgLhksgWkXOcApm4TBHYrrsglaFjbpv6in10Z5hbrM4zOLfQkVmuymSMXLPTZ8DIxnG2YrsZ3dUh3ko6xYIIaSYMJJBHI8ayQhFc49TqBTqMA+XYFZF/HCLLfU57c+SWzYt+nOIe1koQhi3JCvlhMNcyPMfisawq9+o2RvJgwknngUQB1q5RTLoMBNCnAcFM3E8qsOsfpjmykA4avqgzifDrC76K2Uu1RT9GGZXMKQ8p1LYKE6yWiWnPndOEWOFEI/rppYMAGhO/lvI8291pYf7d6iivmY7HeIwi9d3OBrPmtE3ZZi56I8Q4hAomInjiQ7RYbaKhnxaMqwdxPnEOXIlFtdNBwIdI+gw99kszlLjGX0ho6ZsV3/YJG5GEzv7w4jGdWga0JR0lo1IRv7PvzX33E6H2US6iI8dagwjVMABMiGElAIKZuJ4wpZFf/lizXEWuugPKM3CP+tCqOGPZBjPqdgX1UlW3eY+izvvlEVl+SKe48YqP7zuxJ9BY9Ff/o9JVNTVVXgT3zPDbML0esry/jM7zKPzgIwQUn5QMBPHE7Uu+ssTq8Nc6KI/oDRi1nofwy1yVIFi6zBnEDujddqfjGPU+eVlwmEuZNqfuL2DJtYBALoHIyPmvlsdZicM/zAfdOUumLnojxDiFCiYieMZ6qI/6yl2q0uaieFwf62Z6uEeemFymG26l8OxuG31HDB6BbNsyKgxKleGMu1PvC5mNlcj4HWZLhtuVEEaisbRk8OwkFISjsZNZ4nyimRQMBNCHAIFM3E8kXhxIxmFTPpzJXTU8DjMwyy0VEdPCuaUISbmSW0+T1IUjtJquXZLQwaAIU37UxcQikWEI9WUMdKvJyvW/cl2wBri4BJCiAOhYCaOJxJVHebCIxm1gUTteCGL/iaNqQRgZFWLiRCnY6t9ABIL0oZTKKjPqRxhHLIKZnNUY/rYKgCj12EWQl+IW4Ex7S9fwZx4XbTWBYbUtjFU4nFdDsFpqEzkqUc6x5zutZSOIB1mQogDoWAmjsc8uKSASEZSMExrqgZQWA/zjOR12wucApf5PhKPaUJ9hYwE7MgzEjAUTBnmYGokAzCq5cRzN6M58XyMVsFsDC3xmy4XEY18RaZwmJtrDId5JNz3AeV3OT35mi10cmGxsL7f8lr0R8FMCHEIFMzE8Qw1kiEc5mmNCZc4l/G8AqujWgoRJBYhVgc8BUcChkK2lgzz5eYDiNE67a9dCmZ7h7ngSEZdAK11w/87FIgDHrdLw+TkWZHhzsRbsR58Zc0wc3AJIcSBUDATxzPUSIZwC6cmRW8+g0v6LY5qKRxmIcorfR5FsA2fyDENLknuS2/Q3hUU/85sLg/B3FpnFsxCQOfz/PcGIzIG0VLrl/V0I+G+i99Plc+txEtGWyRDbclgrRwhxBl4RnoHCMlG1OQw5/cBGovr2NWfdJilYM4nkpG4P+kw94Sg6zo0TctrPzIxkLyPar8H7uTtDtdpdF3XTQLFriUDSDiXkVhcHrDMVE73x+M6XK7iPR+lJhiJYfdAYgCL2pIBFNbFLMR1TcCDSp9nRB1m8Xur9itnK0Y4kmE9QO1lJIMQMgqhw0wcT2QIPcw7+0KI64mWC7FwL1fBrNZhTWuqkpd1DUQyXS1vDIfZLTO1wyW2IjEdak1v+paMqOl5mza2CpqWuP6ugfCw7GuxEBEdn8eF+uTCOEFzAdP+rPGOQlzqYiF+R1V+j3wtdY62SEaUkQxCiPOgYCaOx9zDnN8HqHAKG6v9qA0kxFGui/7UD/YxlT6MqUq0WBTbsZMZZr9HEWzDI3KCFsfeGr2o8Lrl9yKm4fe4UOFzo7EqGT0YZbGMNqUCznqmoJADlnbLAkJZKzcCQ0P6FMHsFIfZ6ihni0RxcAkhxIlQMBPHY3KY84xkCDexucaPan+yVi4cy0nICPHh97jgcbuMbGqRBaKockuInKRgG6YqMKtjL8SMOFgQ8YK+UFS6zjXJej4xJW/UCeZu+4YMwIho7B6I5Pxaa7M4zCI7XIqzEdnoVw6+WpQM80hO+xOvJZHayd6SoTjMMQpmQogzoGAmjic6JIc5IWaaavyo9Cfc0lhcz+l2VPEBGOKx2O5vvymSkX8kYChYxzf3yQxz4nIh4PtDUdnAUCWeD8VJHU2ka8gAgPpKL3zuxJ9F60j1dIjXg7g9v8ctO5CH290VvyM1wzwYieVVpVhsxOt7bLXf9H06mGEmhDgRCmbieMJDyDCrDnOVz1jjmkuOWc2DAob7WGyBOGByBYd3wZj1wMEayVAdZnGZ9QBipEZAF4o6lc+KpmloqsmvqaTNZghKywhN+1PPVlT43KhJ/q5GslrO+lrK9t4LRws/o0QIIaWCgpk4nugQIhkdUjAH4HZpMpObS7WcKj4AY4xysQWiXPSnnEbPJxIwFKz30R+KQtd1KWqE8OsLxeRzluIwj7ZIhsURtiIXy+XoDgsXWY14jNTBhNGSkXidy2q5Ecwx96W8lnJ3mJlhJoQ4BdbKEcejLvoLFugwC9ewyu/J+RS1VXyUqsFiICxq5dyoq/DC53EhHI1je1cQz3+0A5t2DuA/DhyHgyfVF3wfiTxtGLsHIpjYUCFFr3g+q3xu9IdjGAjHEIzEEY0nnnNTJCOUyOMK11IIoNc27cal97yB9W29uOSEmfjMQeML3k+VwXAMuwfC6AlG0DMYRc9gBD3BCGoCXhy3dxN8ntyP99t7gnj2vU401/qxcWd/Yv/r7AWzMTymsEgGYJyNyHQbu/rD+KC9F4ORGIKRGPYfXyebXHLlvlc348OOPlx5yixommZa9Ccey0ed/SPaxWwcfInXUpZFf2zJIIQ4EApm4niGsuivQ4lkAAlRuqPPftqfruvY1R9GoyVraY1kFPv0tjq4RNM0NNf4sWX3IM7442op+P/4r49x4IQ6nHXkZHz24PGo9KV/63b0BrFuazfWbe3BW1u78fbWbmxTXOAJ9RX41xXHw+3S5PPZWO1H/64BeX2BdAWD0RTHXbioH3f24+POhAi9+6VNOQnmP/3rI6zb2oMpjZWY0liFE2Y1yxYSAFjx1nZ893/fkMLdSnONH1+dMxmHTm7Atq5BdPSEcMKsZhw4sc52+6sfeRsr3mozXWYXyUg85vSubCyuo6M3iM27BtHWE8TOvpBtJlqI8afWd2BGUzUOmliHQPLsxgftvbjr5U148u0208Hg2Go/Xv7hiaZO6939YVT43PK6Vq559B30BqP44mETMKu1NvU16ySHuSY3hzlEh5kQ4kAomInjMQvmoTnMQmjafWj/8h/r8cd/fYx7LzgSc6Y3pogPIRCtGeaOniDufnkT/vNTk1MmxwmCkRjWbu7CW1u78UF7H07ctxkn7d8KwDy4BEgIry27B9HZG0JDpRdHTm/Eqnc78NbWbvy/B9/Czx97F186bCLOO2YaJjYYjuSrn+zC9+5Zg61dg7b74NKAuA5s7RpEXzCKukqvzITXVnjgcWmIxnV5QFDhdaO2wqjisy76O3zKGMyb0YhQNI5xdQE8+ub2nAZ+7OoP4xcr1psuO3xKAx64eJ78/rkPdyAa1+Fxaaiv9KI24EVNhRe1AQ/Wt/WiozeE5f/8wHQb/1i3HY9feqztfW7tMnLGuwfCaKkNYL/xtbbbimq/W577BI++uR0VXnfirEQwip5gxCRyBX6PS77GAGMS4prNXfj23a+nfS4mjalAbcCLd7b3YEdfCDv7w/J2Vn+0E1/584vwuDTs3VKD2VMa8L0Fe8nFc73BiKz6a+sOJgSzZaGqeCyrP9qJCfWVaK714/ApDSl1etu7B/HqJ7uxdnMXDp3cgFMPGpd2n7Px+LrtqA14MW/mWACGoywOIvrD0YzDf+gwE0KcCAUzcTyqy5iPYNZ1XTpr4jS7EBIDNqeFX9u4G0BC5MyZ3ijHHVcnRbYQMrv6w4jFdbiTTuDtqz/BjU9/hHA0jitOnpVyu/G4jv/47b+lCwsA//6gUwpmtSUDAI6a0Yg1m7vwtTmTsfjTe6O+0oedfSE88NoW3PXSJmzaNYDbXvgEr3yyC4999xh5m4+s3YatXYNwacCMpmocMKEO+4+vxYET6rBXSw3qK7yY9ePHEY7F0RdOCuakwxzwuFHl96B7MCIPCKr8HhlHsauVq/C5cff5RwJIuKaPvrk9p3YPkXmu8rlx/KxmPPrmdmxKutsCIc6XnDIL5x0z3fSzcDSOf6zbjv99eRN29YcxpsqHFz/ehY07B9IKMfEcX3/mwThyWiM0DWkF26emjoHbpWEwEsPGnQMpP/e4NIyvr8C4ugDGVvsxpsqH42c1wes2IiKfOXAcfG4NL3y0E69+shvr23oQ1wFNS0RaPnvweHzliMk4YELCET/i2n+iozeEtu6gfJ29tGEngMTr/53tPXhnew/GVvvxvQV7ATBHg8SBSp/l4GtcUqQ+/V4nnn6vEwDwqy8dhC9/apK87mX3rsHf39gqv/d7NuKUA1oLmt64sy+Eb931Oip9Hrz5k5PgcmkpGWZdT8SQxIGXSiQWR0x5v1MwE0KcAgUzcTymVfOR3CMZ/ck8LgCMrUmc7q/yi0V/qQ6zWLwlHFZrHnRMlQ+aljgtv3sgLJ2+rbsTju7uNBPvekNRKZaP3bsJ/3q/E+29IcTiOlxaan3d4pP2wbeOn2k6Dd9Y7ceFx83A+cdMx8Nrt+HSe9dgs0Vkdg0mMsZLTpmFC46dYbsvlX43wgNxDCQfm3h+/F4XqpOCWYjemoBHPvZ+ZXBJlU0cRByQ9ASjCEZiaSMEgBEPmDSmEj9YuA8efXN72mlwdqLK53Hh84dMwOcPmQAgkXXed+njGIzE0BuKygE1drdX7fdkFYJHTBuDV360AB29QfQFoxiMxFDpSxxQ1FV45QLSTLhcGk4+YBxOPiA3p7a1LpAQzD1BHIiEiBYHFmfPm4pgJIZ7XtlsOrDYrsRsxJmUvmDiNSCet88fMgHvt/eirTuI9W292N4dxCc7jQM3AHjy7URUZe+Warzf3odQNI7BiL2gzcbWrkHE9cR7Z1fyPSLeR41VPnmWoz8Utb196wFxughWMBLD29u6ccikhqy/C0IIKQZsySCOJ1qg47Szz4gWiCiG+JC2RjJ0XZcLo4Rwti7687pdGFOZEN5qR6/V3bMiVv27NOCWRYdL0b2rP4xgJA7x8CoVAZFOcLpcGo7eK3GquycYNblxYkhGQ6XP9rqAIXaFe252mBP32S4dZrcU8X3KaOzqQKrQqa3wyEV42RaYyVx5bcAQ5OEY4spjsVbYZaLC50Ztcp/SOdzWg59sjKnyYVZrLQ6fOgbH7NWE2VPGYFZrLcbVVZREoLXYdFqL3Pl+42sxZ/qYxGVK3EZtJxGPu9/iMI+p8uG6Lx6EW885AqcdmjzAUA46dV2X3//1m3MgTHe7jH8uqPl+sX/idVMb8MrXX7ocs7UXPN3gkt899QG+9IfVePD1LQXtJyGE5AsFM3E8hWaYd/YnHF91MVm14piq9AxG5W0L8WEnssTpcjvBPJBFBFR43fC4XWhM7k9Hb9AkTCozuLIqdRWGg9ozaEySEw5zfSbBnBTFYl/FY/Z7XfJxCtFT5fPI5ysUjUsHXRxAqGialvMCM7vpi4BZpFld92wY/dWpYl2tycv19oYbsQCxXRHBbd0JcTy+rgLj6yoAANu6DcGsRjJSz4qk/o5EpaJpMEjMOGCr8LmNA6ocahftMMdEgojFddkCU+V3y4OtdLefIpjTvN/fa+s1/UsIIaWGgpk4HvOkv9w/yHf1JQReY7UhIOWiP4uDpk5kEwLYLhZgK5iFu5fGlRMOXkUyo9yUjC909oaM+/C5c86Met0uWe2mxkC6k/+vr0yNJAisix6FQAl4DDdZiJ5qv8f02IVzXO23v30Ry8i28E88X801/sTY8eTjVl1H6wLDbGQa+KK6+IXEDIYDuwWl28VCxboAxtdXyMuEE69GMsRBSqYDDSGYB8PGeygYjpt+nimylAvq776tO2R6T1Qpr6f0DrM1kmEvmMX95LLIlBBCigEFM3E8hU7622XrMAuH1Sy8zW5dMOlKmk9vA0BTMre8Ixn3CEZi6Amax0lbEQJFxCyE6O7oDZmGluRDXVIUd9k5zBXpBbN0mEUkQ8kwC3dRiJAqvwdetwv+ZNSiTYlq2CGq+7It/FOr/jRNU1xHRTDbPPeZEAM67KYwquIsVxd/uLEK/t5gBL3J/R5XF0BrXQCalngviDMndov+MmW/xQGbGskYiCS297g0eN2urII2Gx2W95HYH49Lg9+T/fatB8TpHGZxwDpcI+QJIYSCmTie6BAjGY1VRt1XVZpIhnoqPxiJoycYtY1kjLU4zKrTnNVhToq1ZuU2jKEl+Qlm4SJ3J3PL8biO7qRgrsvBYRb7GkwKFL/HOF0uWyyS+yT2Tdx+jU2GGVBEXzaHWckwA0auWiwqBJB3hELct11+Wowez8fFH25Em4V47sW/tcmFl163S75uRI7Z7DCHEIzEZOWdXc5cvP4GFIdZHMwJMS1+FwMFZpg7TFGloCn3rmmaPGBN52Dn4jDH47rt+48QQkoJBTNxPGrvbTgWNy0Oy8Su/sSHqRrJSOdwWXO3HYo7pjqqwmHu7Es9JZxeBFgjGYZgNoaW5Od8ioV9IpLRG4xCTz4tdRkcZmutXsjSkgEYAl+IG6v4ShdrkM551kV/RiQDMAS4cOijsbjch3RutpXWDJGMQl384aTFMmZciOFxyewyABnLEIJZfazhaNwkoO2aTMTrT80JWw/mqmSN4NAzzG3dQWPYjVh0m2XRn2jB8SUr+uwc5t0DYbkQmJEMQshwQcFMHE/EslI+3cp5Kzv7Miz6szhoVpHX3hOydTmtGeZORWjbdTsDwGAyJxqwOMwdvUF5nXyztUIUi2aMrsHEY630ueH3pBeZQpjLDLNNS4ZA7JNVfKVzfdXHlQ61jURkno2DmMRj6VccUDun1I5MY8vtojVOQ2SYe5NtJNuTi/vG1RuDcIRg3to1iHA0jh3J17fIgG/Y0QcgIX7tmjxkhlkRzNaDuXSLYnNFPVNj9x7KdvvCURYDc+wEsyqS+0LRgt1wQgjJBwpm4nisgjnXHLNdS4YhGNNnmMX3ubRkWCMZup7qfguBYghm+0V/+VBvyTAL4Zwpv6w+FiEyTBlmv70wtgrNtII5QyxC0BM02khE7tiorks8T+I58bq1jOLf7r7tWjLszhQ4jWq/0UjS1hNUHGZDME8QC/+6g/L16vO4MKUxMe1RdH2nO/iSGWblgETEMwyHuXDBHI3FsbNfFcxBo7vbn9vtCwEvagLtzihZXeVsZzQIIaQYUDATRxOL67AmMHJtytglM8zZa+WEAKlRREu/Tb5YCmabSEZcT81gAmqtnMt0Gx29RotAvg5zfUXiMYlmjC6ZX05fKQcYBwxGD3NSMCstGQKZYc4xkpFLrZxw5GsCHnkAIQVzcuhGpoVr6ZAZ5t5gisCSBz42MQUnIV3y7qBsyDBFMpLieVvXoHy9ttYG5GP/eEdCMNvV/gH2LRnWDHPlEGrldvSFoR4v7uwPo0tUESaHyVRZDo6siDMeNcqBn/WMkjW3zFgGIWQ4oGAmjsbqLgO5L/yza8mQDmuaRX/7T6gFAGzeNSCHgpgc5mSGuWsgglA0luJu2S38C6ZZ9NfRozrMhS362y0iGaJSLovDXG15/LJWTmnJsG6rPn6/x2UaAa0inPPdA5G07QZGHENdiGkW8b0FPCfi9iIxPWXiotM7mAVqtdx2IYgVh3mckmFuUwSzeOwbkg5zuhiLXUuGNcMsF+UVEHMQB0ottX543cmYyE6ziM910V9dBsGcst4gS+83IYQUAwpm4mhUwSzc0VwcZl3X5elhMcIasF/0p+vGqvuDJtYDME5vA+YqsroKrxQDO/vC0mkW2OWYrS6ecJgHIzFThVs+iOEkwlnulkNLMgtmo4c51WFOH8lwp1xmR0Ol8dxYnxeBUSlnCEHR69wr6/kS/6Zr47DD63ZhbHJxpzWWke+Uv5GitTYhiNt6gtieXNhnF8nY2hWUiwNb6lSHOZFhTnegYZdhHkwTySikVk487wkRn9injzr6TfskzyakEeRi0V+N8ruyRrCsB6mMZBBChgMKZuJo1KEl4sPcLvZgZSAck9vZL/qLybxx10BEulj7j084zIb4MFeRuVyaFOA7+kIp7pad0LBmmKv8HplZ3rBD5E7zzDBXiFq5sHwMQHbBbPQwpzrMaSMZyuWZFuFpmiaFkt3iO0BpyKg1DmKsrmMhkQwAae+7v8CFlcNNa12yS7rbEMR2LRk7+kLYtGsg+fOAPABrl4NlMjvMwYiRC7YO1RnKoj/xvDfVBKQz/nFn8n1kOVvRF0zjMCuTJ8Wo9XSRDNGkwUgGIWQ4oGAmjkY4zC4tP4dZxDH8Hpepsq0yKc5icV26q2LKX2OVDxMbEguoRAOBnchSF/5Z3S27FfvW096AsUjNEMxDjWQkM8wV2TLMxgEDYMkwp2SVza4jkD0mka1azi6SIe5XHGz0FShw0zVlGNPvnLvoDzCq8T7s6DMNLRE0VHoRSObg12zuApDIbovXkiDtoj/l9Sd+79bX5lAyzEK4ttT65e9CCPtcWzKCysGlP021nDjo2qe1JvE9h5cQQoYBCmbiaCJJJ0ydOJdLS8ZOZcGfphkOsSr4hEATzlxTjfFBL7Bz60SOua0nKCf+CdfZzmEOWhxmcV9AoiIssV/5tmQkIxly0V/i34ZcHWabDLNVUNq1ZGSreTOGsqRzmFMjGdYYgFj8V5O3YLZvyijUsR5uxP6/uaUbgDG0RKBpGsYnHed3tvUAMGeYBekep/r6k2cYrINLhjAaWwjXFiWSIfqSxesm+6S/ZAWjxy0dZusBsngNHZBcb0CHmRAyHFAwE0cTSX6AJgSzcJizC2YxtGRMtdlxdbs0Y+JZ0kVrVz7om3IQH0Icv9/Wi7gOaBoweUxCyKhT1AQiGlJhI5hFq0ChDnNPMIpYXJcT/7JGMnxmhy+cIcNsG8nIsp/pRKvAPpJhPk0v3O98YyrGpEFrJGN0tGSIGEOfdJcrUrYRsQwhRFvrUgVzOifd7dKkCBXOsrVWLl1PeS6I91Fzjd+0WBFIjWSku331AE4cIFsdZhHJ2G98HQAu+iOEDA8UzMTRRONCMGuGw5xDJMMYWuJP+ZnV5TKcMT/8HrelVSNVfAix+3bS5Wus8slBC3bOnFhYFVBc5FxdwXSoLQLdgxGjVi5LJMPaSCEdvQw9zFV5COZsw0uEG6gemFhFWqGL9Izx2NZc+SjJMFuiFerQEsF4y2WtdamRDLGI0o5Ky7Q/a4Z5KIv+jEhGwOZMjTUjnaZWLmIcwPlsBHNiUEniugeMp8NMCBk+KJiJowlHE06ax+2C3ysEcy4Oc2oHs8BanaV+0AOWfG2GDPO723uS3wdSnFsV2wxzjcWBy9P99LpdMrLQNWD03ebakmFd9Of3uE374HYZByhqDCOb6GyWXcz2IqazJzWSYXWYxb/5RzKMqIzKaBhcAgCN1X45tQ8w55cFwmEGEmc2mmv8qFYWkQKZH6fRxWyfYRavgXRTKzNhjjbZv76N0dvpIhmqw5x6RkkcDFX53JjaWAXAqHgkhJBSQsFMHI1wmH1qJCPPDLMVo1pNZJhFTCDxIa9+2Gda9Cdc2uYaf4pzq2InmFOjH/mLuTpl2l+utXLi8URiOsLRuLLoz2WKq1T53DL7rR40ZKt6yzTtbzAck4vZ1EiG1dUsNHOcNsMsF/0522F2uzTTwZqomVMZr8Q0xlb7ZSe26jJnepwyjmQ5YBpqhlmd8teiDFOR+xQwx3vC0bhtV7d4bwe89g6zzMDXBlBf6ZVNGdZhJoQQUmwomImjES0ZnkIjGdV2DrPZDW63NDe02Ig5FavYba7xpzi3KoYocZmuo1JIXECOxx4IK6Oxc5v0ByQev3VBol1uWXWesznhmSIZ4rKA12Vyj4UI7w8VJ5Kxoy+EaMx8Gr+Q2xsJWkyDSjI7zGqEQ31NZnqcAUsXs4g3WKcuphvzng4x5c/t0tBY5UsVzDbxHjtRbpzxcCmL/lIFc1ONH5qmmaZmEkJIKaFgJo4mErNpychj0Z+dw2w0RSQ+nNXV/eq/QOaWDPm96jBnGFwSMNXKFUEwJ8Xx1t2DchFYNofZ6zaESE8wIq8n4xciaxqwd5WzOeEiarGzP2wSrYC5IcPUXKJ0Y8fjesGOcGOVD26XBl03agGB0TPpDzCLYPtIhnGZ+jrNFiMSVFgzzMnXZqUlwxzXzQNOsiE7mKv9cLk0VPs95oOu5P/V97Fto0zUeK/YHSB39poPbrPVGBJCSLGgYCaORjrMLi3PlozcFv3F47qpPxYwn962c1TtHOYqi2utYh1cAqSK7nxr5QBDHH+yM9F16/e4TPeRDnFf4jlS983aZmD9f7ZIRjrRCth3MANmgdcfjsoMc74C16VEGtQc82gZXAKYRXCmlozEz1XBnDlGJKi0jMe2jm1XY0P5dDFb30OAfROK+n+7pgwjkuFKE8kQTRzm9QbpagwJIaRYUDATRyMm/fk8yqK/HJyvnVIwZ45k7BoIIxrXoWlGXVyz6fR2qgCt8ntM0YbmWmXRn20kI7VWrqHSZ1rgVVlA5ZkQzBt39pu+z4YQVLsHDEFrOMw2kQzlOcgmOl0uTR4MWAeI2FXKifsWz0VfKDqkCEWzzDEn7kvXDcfa6Yv+AJjq2KzVbEDiwEa8ptWfpxOndtcH0i/6c7k0eUCVT45ZnfInaLFZ2Akg48GlcJj9HrfMJ6sHyHLRqDy4ZSSDEDI8UDATRxM2OczFacmQi/7CUel6NlYZC6iyRTIAs8tsdpjtephTF/2pI7Z9SkwiH0QkQ0wLzJZfFghxL3LePrdLjv+WWVNFwKviJRfXN52IsRtaAiQGclQrOWbxHBYSoWiRp+gTAm4wEpNd16MhkiFc45qAJ+3+ilhGQZEMy6K/AcvgEqCwajk7h1kIek0zZ+eN20/fWe73uuBP7qvdor8meXCbfpEpIYQUEwpm4miipgxzbpGMwXBMCoFG20V/hoMmhlyoH/TZFv0BhhsNJDPMPrMQEei6ntJ1KxDCslDnUzjKm3clpgXW5egwi/Hg6vhwgV0kQ93HnARzmoV/HUrtmBUh0HuDhsOcbaqgHUKkiYWc4rY0zXzA4lRmNlcDAPZuqUm7zTnzpmHOtDE4fp8meZm52SV7rVzQGslQXpvZxlfbYV0HACivb5/HlFmvztDEEVIyzOIgLRyziWTUms8GcXgJIaTUlI1g/uSTT/DNb34T06ZNQ0VFBWbMmIGf/OQnCIfDpm00TUv5evHFF023df/992PWrFkIBAI48MADsWLFiuF+OCSJyDCbF/1ljmSIeiuf22Ur8FQ3uEOZTiYYW+2H+HxPJz7UDHJzTQCVaVyzSExHLLmwzpovFvdZSBwDMMZjC0FRX5FjJCN5f7uSkQwRdQHUlgzzvh48qR7Vfg+mN1Vnvf3mNPVuRv40VTBXK67mUCIUQrCJDLPML1tEm1PZf3wd7jj3CNzwn4ek3eZLsyfi3gvnorHafJZDkCnKUmHJMA+GU89+iAMqu6mV6Wi3eR+JBYzW32MmB1s4zKbR2EqNZKflLEWmSMbb27rxSfLsCyGEDBXnn6PMkfXr1yMej+OPf/wjZs6ciXXr1uH8889Hf38//vu//9u07T//+U/sv//+8vvGxkb5/xdeeAFf+cpXcN111+Ezn/kM7r77bpx22ml4/fXXccABBwzb4yEJDMGsKRnmzA7zLiW/bCeSxAf24+vasOKt7QDMzpjX7UJjlR87+kJpa9SES1rj96DC55YC0+owq00DVodT3EahUQGrQM49w5x0mPuEw2zs19TGSgDA5ORQCMFfFn0Kg5FYTvsqDiZ29JlFTKfSoWtFuMmdvaEhRSiEYGuXgnn05JcFx+7dlH0jCxMbKlFX4UVNwGM6Y2BFCuZwHLquY8AmLlTlG0okI7XFw/p7zJhhFrVy6mjsWOKycDSO3cn6RPF7lpEMi2B+7oMd+PotL6GlJoDVV55g+3fgH29tx0NrtuJH/7EfJidf94QQko6yEcwnn3wyTj75ZPn99OnT8d577+EPf/hDimBubGxEa2ur7e3ccMMNOPnkk/GDH/wAAHDNNddg5cqV+P3vf4+bbrrJ9jqhUAihkPEHu6enZ6gPhyQRtXKePCIZmRb8AcDkMYkPR6O9woUF+7aYtvnG3Cn41/udOGhive1tCLHbVGt2ia0ZZiEAXFpC9JtvI/FhXznESIbxfX4ZZhnJUBzmc46ahtlTGnDwpHrTddzJqrBcaJLNBekyzOmbS4Qz7CowQtFiGZwymjqYh0KFz41/Lj4OPrcro5MuJ/1FoghF4/LgZKiRjHabuM3sKQ0YU+VLOQCoTR4ciWE7KurgEr+lJaOzzzhzJF774rW0sy+EWFyH26WhvSeI793zBnQ98XrqDUVRGzDeK9FYHL964j386V8fA0icPfnW/Jk5P9Y1m7tw3u2v4jsnzMSieVNzvp5TiMTiWPbk+whGYpjYUIHpTVU4dq8meNyFnXDe3R/GPa9sxt4t1Thq5ticmnoIGY2U9adId3c3xowZk3L55z73OQSDQey999644oor8LnPfU7+bPXq1Vi8eLFp+4ULF+Khhx5Kez/XXXcdrr766qLtNzEwT/rLLZIhnFO7/DIAHLPXWNxx7hHQkXBUx9dXyAV/gu+euBe+e+Jeae/DcLiMnCaQKjLUU95WISOy0gU7zBaBXJdjJEMIdHFgEVAcZp/HhcOnpr5n8sFOMIejcSnQ7QSzGGTS3p0cfewvLEIhM8y9Zod5NCz4Gyp22XArxmjsmDyYA8xxoXwX/Vmn/AlaagN49UcL5IJSgXCFrZGdeFyX8aKAzeCSDtnE4ZevjcZqP1xaojd6Z18IY6p8+M7db8jXduJ6ISmY+0NRnHf7q1j98U75c1FjmCv/fr8TO/pCuObRd3Do5Pq0B9WlRtd1fNTZh6mNVXmJ3Wfe68RNz35kumzBvs3409cPT/ldZWNHXwhf+/NLeK+9F0DCfJi/dzOuOe2AnF6PZPQgKljt2nv2FMomw2zlww8/xO9+9ztceOGF8rLq6mosW7YM999/Px577DEcffTROO200/Dwww/Lbdra2tDSYnYbW1pa0NbWlva+rrzySnR3d8uvzZs3F/8B7aEId8nj1uSHejaHeVcWh1nTNBy7dxOO27sJUxqrUsRyLnx6vxZ8er8WnH/MdADGKf/BSExmlgGjJsu64E/cxrF7N+HrR07J+/4BO4c5vwzzbpsMczGwE8winuFxaWiwccLF8ycc5kIFrqgy6xqIIBiJGQ5zgTnxckPNMIszLF63ZnoPVOWZYbZO+VOxE2CiCaSte9B0ufq+9ts4zOqUP4FbaZtZvuoDXPDX1/DyJ7tQ7fdgbPKAuUOpN3zgtS1Y/fFOVPrcOHxKQ16PU9CXjF1F4zouvXeNPCjOh4FwFHe9tDHnxYr//qAT67Z2my57eO02LLj+X/jtUx/mdd/ieZ/aWIlTDxwHv8eFf77bgeX/fN92+1hctzUpOnqD+MqfXsR77b0YW+3D+LoAgpE4Hn+7DQ+9sTWvfSLO578eX48jr1uFFz7aMdK7MmI4XjAvWbLEdqGe+rV+/XrTdbZu3YqTTz4ZZ5xxBs4//3x5+dixY7F48WLMmTMHn/rUp/DLX/4SZ511Fn79618PaR/9fj9qa2tNX6Q4iEl06qK/YJYe5h39RlVcqWis9uPP3zgcJyajHOopfzXHbDflT9BcE8Ad5x6Bk/a3jwdlw+oo51wr5zdHMlSHuRioGWYxXlkVO3YiqtqfeCzCdSw0QlFbYWR4O3pCo2poyXBgRDLiKWOxBenOlqTDOuUvG8Kh2t5tFosmx9uTOrik00YwA4YAv/ulTXhqfQcA4L++dJBsGlHzzaKC8etzp+CEfZsB5Bc9AYwJoQDwcWc/rvvHu3ldHwDue2UzfvT3dfjtqg+ybtvRE8SiW17GObe9Yrr87W2J6J9VSGejM3kG7qiZY3Hj1w7DdV88EADw26c+xOPrtqdsf/pNL+D4Xz9j+v30h6L4yp9exAcdfWitDeD+i+bh+SUn4BtzEwf/1t9tNtZu7sKC65/F08nfH3Ee7yfPInzY0Vey+/iwoxcH/vQJnHLDv0t2H0PB8Z8il19+Oc4+++yM20yfPl3+f9u2bTj++OMxb948/OlPf8p6+3PmzMHKlSvl962trWhvbzdt097enjbzTEpLJGqz6M/GYe4NRvDGpi7MndGYNZJRCvweF9wuDbG4joFwDDXJU8DWwRDFxJtsAREuaq4Os+jE7U2eii6VwxyKxmV+1K6NREUsmmwfosOsaRpaagPYtGsA7b1BJZLBXCWgjMYOx1LGYgvyjWTYdTBnQkwwbLMMthHva49Lg8ftShlcsjtNt/oPFs7CPa9sQl2FF43VfhwxdQyO3mssVr7Tltw/437akkJufF2FbMLJ12EWr6lj9hqLf3+wA3es3ohTDxyHOdMbs1zTQEzn3Jj8NxObdg0gricOGPpCUfneEO+VbV2Dma6egvXA44uHTcTb23rwl+c2YPF9a7FPay2mjU0s+h0IR/HGpi65r/u0Jg5Cnn2/Ex919mNstR/3XngkpiQXCYt/2/Os+Vu1vgMfdvThsbe24/hZzXldlwwPIi6VzbAaCj3BaLJaNHV9gxNwvGBuampCU1Nuq8a3bt2K448/HrNnz8att94Klyu7EFizZg3GjRsnv587dy5WrVqFSy+9VF62cuVKzJ07N+99J0MnErfpYbZpyfj5o+/i3lc3Y//xtRCJiHSRjFKgaRoqfW70BqMmx8qu57aY1Fd6pbDJNcNsdVuL7TAHvG7U+D3oDUXR2ZvIjxoOs33+TbRkiO2GkjluFYK5J6hU1Dn+T92wIAeXRKK2A3WA/Bf92U35y4RwmLsGIhgMxwwRLxoyks6y3xLB6kouErRm94/eayyO3mtsyv202NQbbk/u67i6AHqSB4x20zkzId5vC/dvRV2FF4++uR3Pf7gjL8G8PRmLsE7DtEM9sGjvCaI6We0oFrZuzVMwi3iU2iV/5Smz8Mam3Xh9UxdWvLUd3z5+ZvL+jOeuszckBbM48JgzfYwUyYBRJdiRw+NSETlya8sQcQ6RaOKDVUwJLQXidSDOODqNsvkU2bp1K+bPn48pU6bgv//7v9HZ2Sl/Jtzh22+/HT6fD4ceeigA4G9/+xtuueUW3HzzzXLb733vezjuuOOwbNkynHrqqbjnnnvw6quv5uRWk+ITkZP+Mi/625b8ABKnKYHhFcxA4lR2QjAb+yf+uJRq5Xh9pRdbdg/K/+eC1VEstsMMJNwrIZhnNFUbDRlpXEghaEX+eyg1cOI+2ntCSq1c2fypGxJGrVxMaYkxP9diUWh/js5rvg5zbSAxWn4gHENbT1C6mcGoeX+sg0u6BoRgzu113iSHmiiCOSkux9VVIBZPuLv5RjKEwK72e5Kxj+0pbnk2hOC0Lny0Q92mvSeIGUIwJ13c3mAUvcGIPKuVDeEwq4LZ43Zh3oyxeH1Tl9w3cX8C1alP13jTorz38kH8DuymPxJnEBIOc5ZF90NBHIzWOPTvtTP3qgBWrlyJDz/8EB9++CEmTpxo+pnIUQKJmriNGzfC4/Fg1qxZuPfee3H66afLn8+bNw933303rrrqKvzwhz/EXnvthYceeogdzCNEVPQwe7SMtXLCnZrZXC0zVuOTp36HiyopNJQMcxpRUizU3HKutXJW9zZTb2+hjK3x4+Md/fLDuTPD0BK7fRqKw2A4i0H5AcxFfwmMSX9x27HYQP4Os92Uv0xomobWugA+7uzH9u5BQzBHzAeXxuCSxH52DyYiGbkO6FFfB0Di4FtU07XWBeTgHrtx9pnoU3LxrWmG9GRDZHy7BxOLUzP9fVDdWnUEuOlAoDuYs2AWDrM1C94iFmP2pBPMyn3LiJX5d64+57qu59x0IxZS5nvwQoYPsZaglJEM6TAXMOV1OHDmXhXA2WefnTXrvGjRIixatCjrbZ1xxhk444wzirRnZCiIHmavy5Uxwyw+bH/4H7PgcbmwefcADpgwvIsvhYupnlY0TnuXZn2tGIftcWlyPHc2rJMFSyHmrcNLhKCwfsAKUgVz4ftkuFxB2TM8mgaXlBK1JUO8NlMyzAUu+kt3MGTHuKRgVt3MkDK0BIAyuKQwh7nZ0tbSkRyK43Un2jzSjbPPhjoMx+s2D8rJBVW4i+tOsQwKUmm3EbCD4ZhcgwAkYhmZxqkLdF03BHO1ZfGkmJLZnUag24h16+9cXb/QPRjJ+SBePKcUzM4lIjPMpYtk9Dq8BtSZe0VIkrDdaGybI1zVyZ03IzXPOBwI4aGeVkyXEy0WDUnxUF9pP9XQDqt4LIXDbK2WyzQWG0g/Da4QVJdLiD+n/gEebmSGORy1HYsNDGXRX+79rK21ibM/aptCMHkgLM4kWVsyRIa5Lsc2mGaLwyzq1FpqA3C5NGPYUIGL/qp8HoypSjay2IzmTocQ7oL2nlBGwWx2fM3vJ0GuC//6QlEpeMbWmJ/H1iwOsyry5fvZEsMJeN2or/SiayCC9p5QzoK5r8A8ORk+xPvQ7vO3WDjdYXZ8rRzZs8k3klEqYZoLQpQNKEJjMM1p72IhIhm5um7AMDnMVsHck1uGWTCUP5jqtL89ZdJfrgSUSEZ/ulq5PHuY7ab8ZcPoYlYEszJ5E0DK+71Qh3kgnOjj3tZlNGQAyuPM09VUX1Oi93tXfzjrQCWBtX86W/5ZdXZF+4RVoG/vys3h3pFsEKryuVP+DgjBvKMvJN3ENlMcxC7DnHqQ1FJjPlDJhT7pMDPD7FQiw5JhTrzHnZphpmAmjsYUyUg6TtG4LoW0IFjirHAuiA8g1ZkreYZZOMw55jqBYXKYk6d7O5Mji8Vp4HSRjJqANZIxdMHcprRk0GFOoMYvupIZ3qHUyqWb8pcNuy5mIYwDNg6zrutGhjnXAT1+j6mCTYhzcd/i/ToQiSGuDBvKhK7rpumR9ZVeuZ8dOeaYrR3F2Rol2m1Eq/W+cnWYZUOGzcHNmEofvG4Num4I4g5LSwaQWHQtDl7szhg11+YfUxHv01zPapDhx8gwl7Alw+GRDApm4mgiMpKhmdocwimCOfH9SDrMRoZZackosfM9eUyl6d9cSKmVK6HDvKMvhJ39IcR1QNMgp69l26ehLNJTncW27qENQik31N+1GFyTqVZOXTBtR6Ypf5mQDnOPIfSCaTLMoWgMA+GYPHjOtT4RMMRbR09IClVx3+LAUddzd82Ckbisrazyu5O936KNIzeB2GYRzNbvVXqDEVNkxBrJ8CQHxeRaLSc7mKtTha7LpRkHm6L2zqYZQ9yGz+2yPXiR1XJ5xFSEsxyOxuXffOIsxGduIZMtc6WXkQxCCkfWyimDDIDULuZSO7m5IBYR9dss+ivVfp24bwv+suhwXPWZ/XK+TqW39A6zqKzq7A1Jl6qxyg9PmjHkxcwwV/k98pSecNS46C+B26VJR1SMRg+kcZjjenY3Kd8pf4JWm0iGyEYKh1kdjd2dzC/73K68Dj6bawwxK8S5uO8Kr1sOL8k1CqA6oOKgTkQQxMFZNoRwF4+vPYOwtLZviPYJcfm+42pNt5kNuw5mldZa47Ek7se43b5QFAPhqGlqp926CWs7SS6ozysX/jkT6TAPQ60cHWZCCiCadJV8bhc8bpd0VNQccyQWl/29I+kwV9rUcaVbWFUs3C4NJ+7bklfntEdZQAkYAyKKieEwh3NqUfB7jN8tkBrRyBdRkSVw6h/gkUC8Fnf22TvM6gFVtkVY+XYwC8S0vx19RvZXRjKSDrMayRARgLpKb86LWxP7ZeTZDYc5cd+apknRm2tThnhvV/rc8gAhX4EoDhIOmFCX9XoigjGxIbHPoWgcPYNR6TAfMqkeQGIQSi6xknTjxQXqwr+eoLFAUJgV6gFwuvUILXlGMsLRuBRjQP6LMEnp0XVdmfRX+sElQ/37XyoomImjCUuHOfHhZDe8ZFBZtVuKIRy5Ui0XEaktGUkRUKJFf4WiOrilcJjFWPJYXMd77b0A0n/AAknxouzTUCMUVgFnXeC0JyMyyyKSYc0wJxokkmdLsrh9+U75EzTYZH+NSX+WHuZoHF15djALVIdZLIwbpxxMGY8zN5FmNzlSCuYcIxliyt/BE+sT18sgLMVtTmmslFGU9t6gFL4HTqiDS0us9djRl93hzt1hHpT7VVfhlUK6ozeUtVO9Wa4hyM1xt77G6DA7j1hcl80uJW3JkA6zMyf9UTATRyMcZq/bflwuAASTjoSmlUb85YpRU5W66G8knW871IiCv8ijsYHE70u43u8kpy9m6+lVXeCh9DADxmlyu9ve0xGvRRHJsHtt5rrwzxhakp/DrGmaFK7C+TUGl5hbMqJxHbv782vIEIhFptu6g9KVVQWzXXd6JoSwrjYJ5qSjmmMsQjjMh0yuT1wvGbOw3zbp4NcETM6tOMgYVx+QIjeXHHNnb+J3bq2UExgOc0gK5pZav6nTOlNDRmJ74ern9nxYX2Nc+Oc81DVDpRxcwgwzIUNAXfQHKA5zRH0DGwv+8jldW2zkpL/Q8C36KxR1UV2gRK68WFhkCObMLqRZMA/NYWhWGhtcWuke42gkIAVzxPS9irHwL/OHYyEdzAIh9ITjas37+5SDXyF2c+1gFoizGm9v7UZcTyySa1TcVaM7Pb9IhnrA2ZLHtL9YXJeZ5YMnJiIZwUgcPUH7+5dxptqAKV7SLl3eAMbVp3Zap6MzzdASgZEtH5SPp6U2oCyeDGaNWBmLIEM5xUSssR86zM4jEjV+j0GbWtdiwQwzIUMgEjc7zAHpMKeK0pFc8AcoE9LsJv35nPVWU0/Dl8JhBgwXa8POfgCZIxmA2VUY6iI91fGs8ntG9EDKaYhOcJn7t4kLVdosYLWjkCl/AmsXszhrJM4iqYt8hXgr1GH+ZOcAgIT4cytZeSPDnN+iP/WAM59Ixo5kzaLbpWFigxKzSOPGigOF1lq/fCybdw+Yat3GJwVzLtVyO3rT18oBantJUHGYA1Jgd6gOc5r3c1O1H5qWeH3tTMZ+MpEayWCG2WmEYqnDuIqNruvy/cUMMyEFEIkaLRmAmmFOPUU00i5uVYZFfyMt5q2oGcxSO8zibHM2UWXKMA8xc9yqOJ5OdStGCmtm2fo9YP9atmNIDnOd2Rk1MsyJ16PXrckWCyEc880wW6Mi4yyLQSv9uWW1Bf02Dlg+kQzxWJtr/HC7NPk6TSeYxcFES60RyVi3NXHGRtS6ja/PLZKh63pWh1mK/25LJEOpiuvIMube43bJjHQuC//6LAKZDrPzEJWOQOJ9mq1ushCCEWPxvlP/ZlMwE0cTjdtHMtSj3EFLf+tIYbeAyCnutxVVkJbKYbauxM+2MExUwVUpDQSFokYy2MFsxvpazBzJyOYw5z/lT2B1mIOyJSOxP5qmmdoZgAIcZouQb7UI5oIdZptFf/3JiYKZEP3GYj+MIR/2cY522UhhRDLWbe0GYNS6TRCRjCzT/npDUdlGke731VwTgKYlMqvvbu+Rj6/JJsOc6XeeTzd1nyWOwvHYzkNtMYnrqXMQikFvcsqfptkfxDsBCmbiaCLWRX8247Gd4jBX2ywgcsJAFTsqlchDyRxmywdqdofZnfx36ALXGskgBtbXYuZFf+mFZKFT/gRy2l/ShQxZRmMDRo5ZNjZU5pdhrvZ7TB++KQ5zjtETgTgYViNDau93NkfVOjwlk8Os67oRyagzHOY2mWv2J28rGcnozuwwizhGtd+T9gDe53GhsSpxu28lhXlzTUC+d9u6g/J3nililU83tfWgjIv+nEfYklsuRbWcOHCqdnCEjoKZOBpj0Z9oyUitlXPCWGxA7WFO3TenCWaTw1yifUt1mLO1ZHiT/w5d4Kqni4fauFFupPQu28RfxBCegQzipdApf4JxygIzwHCY1TMe4v/CYc5nyp9AFfMiBiKQLRn51spZnrPmHGMZcjx3bYVp3+wE8+6BiDQMmqr9KW65ELEikpEtw5ytg1kgfi9CFLXU+uV1Pursg64nFtIKYW1Hc5aoiYpVIDOSUXz+/UEnvnnbK9iZQ/WgHdbpi6WolpP5ZQcbHBTMxNHISX+u9C0ZTmmiqE5+iIZjiSJ+XdeNfXPYKSaTw1yiKj6167Wuwpv1gKa6iA5zwilLiLih5qHLDetrMaPDnMF5LXTKn0Dt9o3E4sqBrzJUR04lTNbKFSCYVYE4fogOs10kA8h94Z/VYc405EOI68YqH3weV4qLLw4KRSRjR18444KsHclBNenG01sfi/q9uK9oMmM6ttpvWjyZehu5RzK46K/0/PzRd7FqfQcefXN7QdcPDafD7NAFfwAFM3E4ctKfJ1Mkw9zfOlKoQmQwHEMkpstFDCPtflupHmaHOZcWBfGHslgLPoTLxUiGGatgDtg0uOSy6K/QKX+CsVV+eFwadD3hflpHYwPmajkg/wxzYv9Uh9mSYc7XYU5Te9WaY7WcdJilYE4/5EOIb7GNdaGeeN7rKrxS+LdlcLjF0JJcHWYgkSdtqvGjscpnEsjZGm9yfT6A1IMyOszF5ePOPjk8SnSv54vVYS7FeOxeh1fKARTMxOGE0znMaq2cQ5oofB6XXKTUF46aJhCOtPttpdKvLvorbUsGkP0DFjBO8TZmccByRQiKoVbUlRu5ZJirbTrFrRQ65U/gcmlSDG7tGlRGY6uRDItgzrOHGTAfrI2zRjLyzjDbO8xyul22SEaP1WFOP+TDOhRGPWsCGA6zOgQmUyxDRDLSTfkTqAcVjVV+eN0uuFyayZnO1qmez7hw8ZyKQUeFLvrrD0Vxz8ub0J08G0ESPPF2u/x/V4HPTWqGufiCOd17y0lQMBNHkzrpz2ZwSdQZkQzAEGcDoaj8o+LSjJYPpyCEgtulyee22DRUGq5Utg9YADjlwFZccfI+uOzTexfl/sXCI6eOWR0pchHMcmplJoe5wCl/Knu1VAMAfvT3t2Rnr99m0Z+griCHObF/bpeW4q5W+vNrybBb9KfeR6YIgq7rKQ6zGkuJWYZ8yCl/ikOu5piblOdddDF/2NmX9v6zjcUWqJWM6u9WfQ9nO2OUrf1DRZyKF7eZaaFpJv735U1Y8re3cPFdr5Wk9my08vjbbfL/RXOYSxHJcHgHM0DBTBxOyqI/u0iGQxxmwBAafaGodL5HegKhHUIolHKUuOpK5RLJqPR58K35MzGjqboo93/64RPxqakN+MxB44pye+VCQIlk+Nwu2XGuImvlMrh9Q+lgFiz9zH5oqfXj/fY+6YCaIhnKvrm0whYECaHXUpOau63K4cBARTwfhUQydvWHEY7FoWnGPjVW+eCSQz7M17VGMhL/t485HTyxHgBw7WPv4un3OmzvP9dFf6rDrN53PhErcb2d/aEUsWVFCGRxnUwLTTOxZXfCXX/ho534x7q2LFvvGWzrGsTazV3y+90Odph7g4xkEFIwuq7LRSYpo7HVlozkm9kJC+ukwxyOGc63A/bLijjlXuqDDOFmFdLTO1Q+NXUM7r9oHg6YUDfs9+1kKpXfebrcvzgturMvnHa88VCm/AmmN1XjvgvnyoVr1n1S3ea6Cm9BiwsPmFALADgoKSpV5OCSIS76S9cK0TUQxo1Pf4gf/f0t/OzRdwAk3hPCOVeHfHRYxHZHj41grrEXs5ecMBMnzmpGKBrHBXe8isdsFnfl7DDXZRfoTVkOksZU+kz59EyIgxVxX4XWyu1Spgr+/NF3pGHx9rZurHhr+x7pOj+ZdJfF52dXgQ6ztXd5sIQtGU4+I+hcKU/2eNTpQpkm/ckMcwnd0lxRF0uJhThOcL6tCCe8lA4zAOzTUoO3t/Vgn9aakt4PyR31AC7dwdz0pioAwPq2Xnz3njfw32ccnPI6zjYiOVemNFbh3guPxDf+8jK6ByMyXgCYHeb6PDuYBTOba/D8khNs2yHk4JIhLvqTkYyeEHRdR89gFH957mPc+vwncjGTYMqYStP3rXUBdPSGsGZzF7oHIxgIxzCjqUo2aqiiVfzf49IwRnk+Al43bvr6bCy+by0eWbsN3/nf1xHTD8XnDh4vtxEtGVkd5lp7Ua4K5pYst+FyaWiu8WNbdxBtPUHT79SKOFgxBsAMXTBv6w7i10+8h0gsjjtf2ghdBx769lE4ZFJ9Qbc9WhFxjE/v14IVb7UVHMkYDod5NLRkOHfPyB6PmPIHGB+cotHBrlYu4AAnV57iDUdRHUn83wnZait7t9SgrsKL2VMaSno/PzvtAJw1dwoO3cM+qJyM+nq062AGgBlN1fjV6Qfhh397C4++uR3bugbx528cjkbFnczVscyFiQ2VePKyYxGJ6SYRr3YyF9LBLJiQRrAVPrjE4jAnnd9wLI6/PLcBNz79oTz9Pau1Bgv3b4Wu64jrwGcVEWtctxtXPbTO9j7tMsxjbar8vG4Xlp95CKp8btzzymYsvncNKrxufHq/lsRYbLnoL/OBR5Xfg5qAB73BqDmSkSZLnY7m2gC2dQezOszCWRS3qS40veuljXh8XRsuOX4m5kxvzHg7QjB/5YhJ+N+XN+OW5zeYfr5p14CtYNZ1Hc99uAP7jqstymvZKezsC+HlDbsAAP/5qclY8VYbuvoLjGSk9DCXMMPs4EiGc/eM7PFEoqrDnCGSYVNHNVIIF2bLrkH5Ae/ESMaYKh9e/tGJJgevFFT7PThscmlFOcmPgCmSkf61+eXDJ2FiQwUuvvN1vL6pCzes+gA/+/wBAIB4XMfOHB3LXPG4XbC+hdVFf4VUymXDrlbuf1/ehK27B3H5SXub1h7oum4MLrEs+hMNFjv7w/j5Y+8CAPZqrsbiT++Nhfu3ZoySHLdPE1atb4ff48LEhkr4PS581NmHYCSOugovpo6tkttOSrrTk8bYHwC4XRp+8YUDEY7G8bc3tuLbd72OH392P/QFo1L05CIKJ9RXYH1br6liztR6k8PvXNzPjizDMmQkoyY1knHzvzdgw45+/PuDHfjiYRPww//YN+3+G4J5MrZ2BfGv9zsxvakKFV433t7WIycdWln98U58/S8vY+H+Lfjj1w/P+rhGC6ve7UBcB/YfXytjab2hKCKxeN4LvSNWhzmPWrlXPtmFq/6+Dj/53H6YN2Ns2u166TATUjjqUW1qrZzNaGwHCNNZ42qAN4B323owPbl4zYmRDMDs3pE9B3VUdEWW7vJ5M8bix5/ZD9+/fy0+aDcaGLoHI3J9QaaJb0NFFcxDcZjTodbKiYzr1Y+8jWAkji8eNkG+h4HEugQRg7VbmDS+vgI7+8Oo8Lpx6YK9cO7R03ISJl8/cgrOmD0Rfo9LCvRYXMeW3QOoq/Ca7uvomWOx9DP74cgMbqvLpeFXpx+EgXAMj7/dhh8rznVrbSCnv0c//I998ez7nThqpiFw1OhNLqJbHEhldZiD5khGOBqXok6t2/vb61vx3Ac78K8rjk95DLquY1cybjCmyoc/njUbL3+yC0dOH4PrVqzH29t60JlGuIvFgh919md9TE5E13X8dtWH2Ke1GicfYCxwfmd7DwDgqJljUVfhhaYBup547+brpFsd5nwiGY+s3Yb32nvx+Lq2jIK5L5Rwv5286M+5e0b2eEQkw+vW5AdJpsElTog+7DsuscDo3e29OGGWc9o7CBHkkmFWGS+rzwzxIlzDugpvSvVbMVEz9oVM+cuGaIuJ64m/KQPhmPx70tYdNAlm4YRqmv3fmiWnzMI/323HN4+ehokNlSk/z4T1b4TbpWFKY1XKdm6XhnOPnpb19jxuF274yiFY8uBbeHd7D2Y0VWNmczUW7t+a0/4cu3cTjt27yXTZjLHVqPC6MXlMZU6/86Zk9COTwxyP6+hPrkFRBflAKAaPOy5/dtd5c3Duba+gozeEbV2Dpt8LAPSHYzJn21jlR4XPjeOS+y+EezqHWfxeswl7p/Jeey9+88/3MbbabxLM4v06ri4At0tDbcCL7sEIugbCeQtmdT0RkF+tnOgGz1bdKBf90WEmJH9EJEN1aUROeUDJHA7ajNQdKYRg/mRnP3YnTxFmc/EIGU5UsVfhzf4RILKlHYqgyDUPO1RMDnOBi/4yoT4X/aGo6TFax1zLhgyfx7Ym8qiZY02O7Ejj97jxmzMPKdrt1VV68fT358tmkWzk4jAPKE5lfUViBHg4GkdfOCpjAJU+N46aORatdQFs3DmAnf1hTDdrefm3NuB1pRwEjs0i3IXD3T0YQTASG3UGh1gcuqMvZNp/0boinPv6yoRgLqRaLnU0du4O87auxP4NZFknIH4PTs4w85OcOJZI3DzlDzBKzUXeCVAyzA74Qze22o+x1X7oOrBmSxcAZzjfhAjU90kuDrNw/nqDRrd4Z45jloeKr8QOs9ulyffnQDhmqoWzdiqnG1qyJ9FaF0BtILffg5FhTt/MINxdt0tDwOsy+r+VgxfxGhNTDnfaCF8x9MYuHpRtP9TR3Nny1k5EPSAxvX57zS0romVmd3/+TRnWlox8auW2dycc5kxTQ4HR4TBTMBPHIgrv1Q/NWhvBPOggwQwA+45LVKit2dQFwBnZakIE+WSYgYTjI87eiNO8QnyUulVAzdmXYtEfoFRBhqOmLmRrp3K6DmZiTy4Os+HaJ4Y7iddmXygqrycWGDZmEL5CBDZUpb5Gsi0+VIfWjMZYhvq4xCRJXdflAZ9ocGlIvn8KGY9d6KS/wXBMOtrZHGYOLiFkCFjHYgNATdLd6A0ab/qgMlHPCeyXjGVsTWa3nCLkCQHMr8d0tXIqmqbJD13h+hWzUi4T/hK3ZACGY9wfMjvM1kEiA2mm/BF7cmnJ6LOIpGqltUQcnAnhLaIVO20Es3CYx9g5zMnr7+wL2w4v6QuOcsHcazwfbcnXb/dgRLrC4gxRg3CYC+hiFrclDpxDOTrM25LuMpDZYQ5H4zL2UePgwSUUzMSxiJW5olIOgDwd2BM0VrUH5ZvZGcJ01jjzkA6nCHlCgMTpb3HWJtf3jDqYA8h9zPJQUWsP6ypKk5cWBw0D4agpt5zWYc7hIIMYr42BcCzt6PF+i2sv/jU7zGKMeFL49qeKWuEwj7E5qBJRjnAsjp7B1P3oU4RcuiYNJ9Np4zALd7mh0ivP0ogDzkIyzMJhFp+/udbKbe8y3kOZHGb19eHkyBMFM3Es9g5z4g9qLK7LKMagwxxmsfBP4BQhT4hAvFdyfc8I0SJEpHANm0rtMHuHwWEW1XKhqCm3bF30l25oCbGnyu+Rr6+0C+7SCGbbDLNwmG0yuDtlJCP1oCrgdcvPjc6+YMrPR30kQ9ln4TC324xVFw5z92DhDrN4HnONZJgc5gwtGeJ1UOF1y6m+TsS5e0b2eMRRrddlvEwrfW64k4sAe5MuszjaDfic8XKe0VQNr+KKO0XIEyKQgjnH94wQLSmRjJoSt2S4S7voDzCq5fpDMVPvb3tyzLXAGIvN93OuZMsx94etkQyjF9t6FkNkmO0W/e2Wi/7sX4/iwK6zN1Us9o1ywaw6zO0WwdxsEsxJh7mAaX/ibG9t8j2Ya0uGyWFOc5YBGB1DSwAKZuJgpGD2GOJT0zR5lNszGEEoGpfDBJzi5HrdLsxsNmIZThjZTYiKWFxVkWO8QOQg5aK/3uFZ9FfqwSWA4TAPhM21cuFoHN2Dhrjgor/8yVrplnTthWAW8Zj+UEz+LsSiv7FV6TPMYmiJncMMGDlmu/1QHeaOUSiY1cckKubE42hRIlP1Rcgwy0hGrhnmLsNhHojEEI+nZsiB0TEWG6BgJg5GlKV7XOaXqRTMwahppr2TnNx9lRyzk/aLEMA4uMz1tdmSjGR09oYSY7H7h2vRX2L/qv2ekp2qFSKtV4kBiCZLNaJhOMzO/lB3ElkdZstBiForl9Zhtolk7MrRYbYTzL2j2GEOR+Om1ov27vSRjPohtGSkOsz5RzJ0PX32WU75o8NMSGFERa2c5YNSrKLtDUZkjtnt0nIaQztc7KfkmCmYidMQH0y5ij/hMLf3BNE9GJEHs43DNLikVO4yYCwy2rxrELG4Dk2DnCSnLvwT8QE6zLkjDqg603UgB80xF/G76AlG5EGZXPRXbTikUUvNmayVSzPcJpPTPZozzNYFkB29IcTiuiKYjQPaobRkGIv+RIY5x0hGt/06ACujoVIOoGAmDsauJQMAaisMh1m8cZ0mSme1KoLZIdlqQgTfOWEmvnLEZBy7d26T6dRaOXUsttqTXAqESJpQX1Gy+xAO84YdfQASIm988v5UwdzHRX95Y+cwr3q3HWs2dwFIv+hv484B6HrC6R+TdI0bKn3QtIRTaW16kINL0hzAyYo7S4Y5FtdNI5s7+0K21XPF4FePr8eJy55BVwGCNR1qNMqlAdG4jp19IaODOY3DnO9jNBb95R7J0HUd25VIBpC+KaNvlJy9cfbekT0au5YMwNzF7KSx2CpqJMMp2WpCBMfs1YRj9mrKvmESkSPtGojIfvFSj8UGgEMn1eOG/zwEB0yoK9l9iAzzhh39ABKuXItlkSOgxAe4JiFnrF3MG3b047w7XkVDpQ+v/mhBaq2cPHjpl9cXi7zdLg1jKn3Y2R/Gzv6QFOPRmJE1T+swp8kw91sEXDgaR08wWpIzGn9/Yyu2dwfx6ie7sWC/lqLcpmj9aK1LCOaO3hDaeoJy8apdS0Y4FsdAOJbXgV84+VkszKpgNHsko2cwKpsxavwe9IaipoMTlT4u+iNkaMhFfxaHWR2P7bQpf4LGaj/G1SX+WJXydDIhw0F9pVdGo97Z3gOg9B3MQGKR7+cPmYAZyYhEKRDCQbhyLTUBKTTMDjMjGflidZjf3NIFXU9kjj/e0ZfSkiGe2y27B0zXF4yxWfgn3GZNMxa2WUk3REUIdo/LWExeilhGPK7L293ek1ptVyjCYW5SPm+2dQWNRX9KJKPS55bv4a7B/HLMhTjMIr88psqHMcmD62wOMxf9EVIgkbi9wyyHlwxG5BvXaYIZAJadcTB++B+zTHlmQkYjmqZJ8fLOtoRgLvWCv+HCOiihuTYghYYpwzxKThs7CatQFQdbALB2c3dKS4bIMosyhWaLYG60ySKLTG59hVe60an7kbieVQyrDndzDqO8C2X3QBjR5INq6x7MsnXudCoTN8VB3jvbexBNZvHV96imacbwEpuFk5mwyzBni3VsTz7OcXUBU/uJHayVI2SIRKIiw2wVzIbD7NQMMwDMmzkWFxw7A5pm/0eckNGEWPgnRE+5CGbrePCWWr/Mftq1ZNBhzh1VhOq6jne398qfvbmlK+2kP4HVYTa6mA3BJ/6frlIOUIW7eTy2utjM6BovngMsUKM91oVwQ0GI+7E1frQmHeY3t3QBSExGtJpNIpaRb1OGrJVLni2N60aLVTq2JjuYx9VVmKob7RgtZ28omIljicbTRTKMDLOot3GiYCaknBDiR+RLhyOSMRxYHeaWWiOS0WFqyYjZbk/SI4RqKBpHXyiKd1WHeUu30pJhL5jFYlN5eyKS0Z/qMKerlAOM12o4lsgoC/oVh7tJqU4sNibB3FXESIYycdMQzN0AzHEMgTEeOz+H2drDDGQfjy0W/I2vD5iGA9khXgeMZBBSIOII1mvpYVZbMsRYbL/DFv0RUm4IESkMulKPxR4u7BzmFjmoJSSHLTCSkT8VPrd8vt5r6zWJ0Xe296ArOaZZHIRU+QpwmLNUygHJ8djJ/VDjHGr/r5wGmGbIylBQD7zaiphhNjnMyfen6KRWF/wJjKaMwiIZ1X4PxAnTbDlm4aSPr8/dYWYkg5ACsZv0B1gc5qhzIxmElBPWPGmpx2IPF1aR1lwTwNhqPzRR09UfRlypH3P6aWOnIfLD//pgBwBgSmMl6iq8CEfjMvJiOMyWPHmaDLM6vGR3lko5uR+iKaNXFczG7zTbkJWhYI5kDBatus7kMFsEsp3DbHQxFxbJ8HlcCCSrJIPhzE0ZYsqfKcOcpiWjVx6MOnuBPAUzcSxCMFsn/dVKwWw4zE5c9EdIOZFyerxcHGabSIbX7UJjlbHwT60fo8OcH0KI/vuDTgCJoU4HTTTXBFon/VmvKxC/k52KC7wrB4cZUIeXGGLbOGvgLqlgVm8zGDGPXC/G7TbV+GQkQ2B9vwKFj8cOK41VosI1WyRDtGSMr69ApXCYQ2kc5mDS6Xf4e4uCmTgW0cMspn0J5GhspSWDDjMhpaXJ4liVi2BWHWa3S5NZWCOWEZTZS7dLg9/Dj818EK+TtclhJfvaCGZxKj5rhtnGYRaCeUyGDLO6H+ZIRuqiv1ILZqA4C/9C0ZjMY49VMswCu0hGQzKS0Z2Hw6zruhTMPo9LmlOZIhnxuI62brHoLyAPStM5zLJWjpEMQgpDTvpzpe9hFov+nDa4hJByI93p8dGO6jA31/jhSv69aVGaMvqUoSVsvckPIURFVVxCMNebthEHLV63y2SQ5JJhzlcwd/amCuZS18pZmzfaiiCYxXPgdWuoq/Ci0ucxCc7MkYzcHeZYXJfrFnxulzSnxGevHTv6Q4jEdLi0xPtI/H7TZpg5GpuQoZFu0p+otukLR+Wp0gCnbxFSUlTHajjGYg8XlcrZKXWUsDq8hAv+Csd6JmLfcTU4WBHMFV63qT9ZLBCr8XtQYfm7Lg7S+kJGpehQHOZ+G4d510BYxgHFgs+hIjLMYojVtiJ0McsFf9V+eRA3ri719atitGTk7jAL4wpIOMz+HBxm0QTSXJOIN4lIhl1LRjyuS+eZi/6GkalTp0LTNNPXL3/5S9M2b775Jo455hgEAgFMmjQJv/rVr1Ju5/7778esWbMQCARw4IEHYsWKFcP1EIhCtkl/um7k0RjJIKS0jKn0ybM9wzEWe7jwuF0yZtGiOJrG8JIQdg2INgdnf6A7EdUlrg14MKG+Aq11AenoWp9T8b1dbWGN3yOn1YlYRs6CuSZ16InqbDZU+uB2aXIS4Y1Pf4gDf/qE7DUuFF3X0ZFc3CiiKMVwmOWCP9Nr1hDJzXYOc5XoYU51mMPRuFzcpxKJGgcNXrfLyDBnEMzvtSf6tsfVJ/ZH/E7tHGZ1fYB1Aa7TKCvBDAA/+9nPsH37dvn1ne98R/6sp6cHJ510EqZMmYLXXnsNv/71r/HTn/4Uf/rTn+Q2L7zwAr7yla/gm9/8Jt544w2cdtppOO2007Bu3bqReDh7NMZCA/PL1O9xy9N2nclTXVz0R0hpcbk06dKVS35ZIJzjFhuH+e1t3bjq74m//zObSzeiu1xRXyuzxtVKN1TEMqotiy6rMwhmTdOMpoy+xDAUcTCTfdGfqI0zxKIayVDz6899sAPXr3wf/eEYXvp4V24PNA19oSgGk+JSCOZiZJhVh1kgmjISj8Wmh7nC3mHuHojgiF/8E+fc9nJKg0colth3TUvEI2VLho24BhKu/fKV7wMAjpk5FgCMRX82GWZxmUtzfrTS2XtXADU1NWhtbZVfVVVV8md33XUXwuEwbrnlFuy///74z//8T3z3u9/F9ddfL7e54YYbcPLJJ+MHP/gB9t13X1xzzTU47LDD8Pvf/34kHs4ejYhkWCf9AUZThjjVRYeZkNIjXKtyGVoiEDlmNfcp/v/mlm5s7RrE9LFV+NnnDxiR/RvNqK+V/cbVyv8fMikhHvNxmAGlWq4vjIFwTLqiWWvlRCRDySgLd9Mq0q9+5G3EknGM3jTNDjv7Qvji/zyPH/79rYz3Kz6jqv0eTB+bOOBSHeb32noLcpx3yLHYxuMWC/+aqv22Y8JFS0ZPMCIfHwC829aDroEInv9wJ17f1GW6jqyUc7ugaVpWh3n5P9/Htu4gJjZU4OL5MwEYzrHdoj857dHncfz6gLITzL/85S/R2NiIQw89FL/+9a8RjRov9tWrV+PYY4+Fz2e8wBYuXIj33nsPu3fvltssWLDAdJsLFy7E6tWr095nKBRCT0+P6YsMHRHJ8LlT30RiPLY41eX0I1NCygHRWlBuDrP4QFczzGpDw8SGCtx1/pyyO1AYDlRBt++4Gvn/4/ZuBgDs3VJj2l4IZrtaNMColtvRF5JxDL/HldU0ERGQHUlnGkhdbCZ+v+o0wL5gqmDWdR1X/u0tvL6pC3e/tAkftPembCMQn1HNNX4ZUdiezDBv3NmPz/7uOZx72ysZ912w7Mn3cMoN/8aW3QNKpZziMCcFs92CP8DIMOs6TNV26kLH/315k+k6YoCYiMJkasl4e1s3bnn+EwDANacdIDPo4oDUrlZOOMzWekcnUlYq47vf/S7uuecePP3007jwwgvxi1/8AldccYX8eVtbG1paWkzXEd+3tbVl3Eb83I7rrrsOdXV18mvSpEnFekh7NJEMDrPIMYtTaoxkEFJ6ZjQnzthNb6rKsuXoYmpj4vHs22o4oNPGVqG+0otxdQHcdd4cjKurGKndG9WoB1f7Kg7zgRPr8PySE/Cr0w8ybS8iGmkd5iqjWk4I5sYqX1Z3Uh3TLVxjNZIBmKdXiniDmAaocu8rm/HkO+3y+7te2pSyjaBTyRqL19D27iB0Xcez73ciHItjfVsPorHMg0AA4P5Xt+Dd7T1Y8uBb8nbV5/fomWMxvi6Azxw03vb6XrdLTjxUmzLUXPejb24ziWl1aAmQXjDH4zp++Pd1iMV1nHrgOBy/T7P8mdGSkcFhHgXrAxwvmJcsWZKykM/6tX79egDA4sWLMX/+fBx00EG46KKLsGzZMvzud79DKFT8mhiVK6+8Et3d3fJr8+bNJb2/PYVImgwzYDRlCCiYCSk9lxw/E3/8+mx8+fDyMgWWfflg/ON7x+BApR+4yu/Bsz84Hk9dPh9TGsvrAGE4CXjdOPXAcZg9pcEkmAFgQn1Fyt/3Mw6fhDnTxuCUA1ptb0/NMG9NTpNryLLgD0iM6RZGS3syAmHt/xUO7fSxVTjvmGmmbQQbdvTj6kfeAQAs2DchCh98fYscomVFjMVurg1IET4QTnQor/5oJ4BE5Z7aLW1HLK5Lkfzchzvwz3c7AJgF85TGKrxw5Yk4/9jpaW+nviqZY1buzzpY5aE3tsrvrZ/DgTS1cu939GLt5i4EvC4s/ex+pp9VyR7m9A6z0xf8AYDj9/Dyyy/H2WefnXGb6dPtXxxz5sxBNBrFJ598gn322Qetra1ob283bSO+b21tlf/abSN+boff74ffz1N1xULXdWiahmjcviUDSC04Z4aZkNJTE/Bi4f7p/xaOVqr8nhQxBxg1YGRo3Pi1w3Le9vh9mk3upBXRxbxp14B0eQ+b3JDTbY+vq8B7wV5s6w5ir5YaWXMm3M0zPzUJ27qC+ObR0/BeeyJa2WuJZPy/B97EYCSGI6ePwU1nzcYJy57Fpl0DeOTNbbYHkkKMNtf4UeFzo77Si66BCLZ1DeLFj3fK7dq6g7ZVcIKd/SFT7lg4v/nGhJprAti8a9AkkoXDPL4ugG3dQfzvy5vwjblToGkaQikOs32GeXd/wpWeUF+R8jjEaOwBm1o5IaIrR0E1rOMd5qamJsyaNSvjl5pJVlmzZg1cLheamxNvvrlz5+Jf//oXIhHjdMPKlSuxzz77oKGhQW6zatUq0+2sXLkSc+fOLdEjJCo/ffhtHP1fT2NnX0jW2dg5zDV+OsyEELKnISIZT7zdjo07BzChvgLfX7hPTteVGeKuwWT/r4gDJD4/JjZUYtmXD8Z+42tRnfyMUQVzKBrDy58kWjP+60sHweN24atzJgNIH8vosGSNhcv8zHudpraK9p7MC/9EFnpstR+HTa6Xl+e7nkDkm9uU+xPi+ZyjpiHgdWF9W69c/Getd03nMGfqKhfucTiWWl03YDlocTKOF8y5snr1aixfvhxr167Fxx9/jLvuuguXXXYZzjrrLCmGv/rVr8Ln8+Gb3/wm3n77bdx777244YYbsHjxYnk73/ve9/D4449j2bJlWL9+PX7605/i1VdfxSWXXDJSD22P4un3OrC1axCr1ncgEs8UyaDDTAghexqqQNS0RJwm1zMBIkO8rTuIgUhMTrCzGjCAIfzUSEbPYFTe76SGSgDA6bMnwuvWsHZzF9Zt7U65HTHlTyw6HF+f2Ie/v7HFtF02wSx+Pq4ugF+fcTD8HheqfG7TsJJcEO6vKpjFPIPpTVUy/3zfK4loqZFhTnzGGrVyZrfYOPhIFb7qABprdIUO8wjg9/txzz334LjjjsP++++Pa6+9FpdddpmpY7murg5PPvkkNmzYgNmzZ+Pyyy/H0qVLccEFF8ht5s2bh7vvvht/+tOfcPDBB+OBBx7AQw89hAMOYJ3QcCCq5F7esEse2XpsIxnmP3AVvrJ5KRNCCEmDWh930XEzcOT0xpyvO77OcJiFI5qu/1cuLFcc5p5gwhGu9nvkCPWx1X6cfMA4AKkNE4DakpG4b5GTfr+9D4Dh3Lb3ZF5rJX7eUuvHjKZqPPbdo/HAxfPydmaFw93eneowN9X4ZRzmo87E/qUu+rOPZFgXUKr4PC75OK05ZmaYR4DDDjsML774YtbtDjroIPz73//OuM0ZZ5yBM844o1i7RvJAZLRe2rBTvoF8GVoyBOUyppcQQkh69mquwZTGSkyor8BlC/bO67rj6o2WClXg2TVsWJuYAKAn2R5RazFsPnfweDyydhte27g75XZEJEN0mI+z5HuP36cZT77TnrPDLKoPZzbXZNo8LUKwC4c5Htexs9+IewgXXURRrPWu6Voyso2Pr/R50D0YSZn2J643GmrlykYwk/IgljxHtnnXoKy/8dgUsFv/YFWMgtM5hBBChkaFz41nvj8fug7p8uaKcJi3dQ9K57gmjcBTIxnxuA6XS5P9zNaWJlGzuGnXgFy0DiREpahoE5GMViVCUeP34MR9E4K5LVuGORntaEnTT50rwukWznf3YERWuDZW+6Tb3Jt008XE3VSH2ZxF7pNZZPvP4iqfG92DEbnQUjCaHGaexyaOQl0FLLoyvZ7sDjMX/RFCyJ6Bpml5i2VAcZi7ghkjBABQrXzG9CVdUcNhNl9nYkMFXFpC/O3oS61r83lcMmctMswAMGf6GPl9Rx6RjKGgOsy6rsuGjLoKL/weo3pPOMwikpFaK2fvMKd7Piv9Ytrf6HWYKZiJo1AFs8DryqGH2UZUE0IIIQKxQG4wEpMdztUBe4Hn97hlHFC40SLDbP388XvcckHhpl398nI5tKTaL11n1WE+cnqjXITX3ptbJCNT9VwuqF3QvaFoysRAsT6oL5xw1qXDnKNgrk7jFFclzwJbF/3RYSakQGwFsydzD7PXrdlOAySEEEIEAa8bY5K1dB92JBa1pcvcAoaYFm60yPfatXJMaUy0ZmzcOSAvkwv+FFdYbbWYO6NRRiy6BiK246YF7Ta3VQgVPrd0yNu7g8rEwMTzIj5bdT0hmiPCYU6Z9GeNZGRxmH3CYWZLBiFFwU4we+wcZiXDzDgGIYSQXBgnWyp6AWR2NoWY7rU6zIH0gvkTRTB3WirlgIRw/O6Je+Gco6Ziv3G1qK3wyFxwulhGJBaXC/OG6jCrt9HeE1Ic5sRlAa/hrPcGo9Jh9guHOSmcU2rlsiz6E9nmAcvkRPYwE1IgQjCrR/B2LRnqHyx2MBNCCMkFkRn+IFnrli6SAaR2MYsFfNY5AAAweUxy4d9OI5IhGzIsC/UWf3pv/OSz+0PTNGiaZtuNrLKjLwRdTyyAH1OZfQx4NtQcs8hcj1Xq+owccyRthjmUMrgks/Clw0xIkRHjsOcq3Zp2PczqHzk6zIQQQnJBNGXIDHMGZ9PaxZyuVg5QIhm7UiMZ2cZXi1hGumo5Gceo8Re02DHl/mqN+7NmmAGYFv6Fkw0avpRIRroe5jQtGekc5jAdZkLyRtd1iETG3BmGYLab9Od2aXIRAR1mQgghuTBOaakA0gs8wOy0AkhbKwcAk8ckBPMmJZIhhn9MbKhI2V6lpS6bYDZ3MA8VsfCvrTsoWzLUCYpi4Z/qMFtr5Qati/6STrG1wUqQ1mEO0WEmJG/U/PI8RTD7baYwAcYfLbspTYQQQogV6yjpapux2MbPrIv+7GvlAMNh3tkfRl8oilA0hjeTo7IPndyQcZ9aku5uOsHcIRsyhrbgT95fXe4OsxhcIoyrCsVh1nXjMztbrZwwuKyDS0ZTS4bz95DsMcSUN19LXQBLP7MfBiMx29NfQOJNvb2bkQxCCCG5Md7iMFdncJirA2kW/dk4zDUBL8ZU+bCrP4yNO/sRisYRjsYxpsqHqUkxnY7WOmMRnh1GB3NxHGZVoO9Qqu8EQjD3BKMpDrM/+Xkb14FITIcv2WIlIxlphK/sYVYGl+i6bmSYR0EPMwUzcQyqw+xxaTj36GkZtxenjSiYCSGE5EKKw5xx0V+yk9hSK5fOxJk8pjIpmAewLZmRPmxyg+3obZXmLIv+itXBLGiVEw+D2NWfWPRndpiNSEbqaGzjjG4wGoPP40I0Fpc1c2lbMmwc5mAkDuGTjQaHmeeyiWNQBbM7h4UN4rQYM8yEEEJyoaU2AFW/ZhJqKYv+gulbMgBzF/Prm3YDAGZPyRzHAAzHtyOdYO41Fv0VA5Fh7uwNyc9d0U8NWBb9WRxmn9slnz+x8E/NJaeLZFTYZJjVqX+j4XOcgpk4BpNgznJEDhhHwRWjYLEAIYSQkcfrdpmEZ04tGaEogpGYFI92kQwAmCIW/u3qx2sbcxfMaiRDzQULOorsMDdW+02mVEOl17S4XnWYQ5YMs6ZpCHjM1XIiv+xzu6SwtiIdZqUlQ13wV4z2j1JDwUwcQ74Os/hjxkV/hBBCckWMsQZy62HuCUaku+zS0o9/ntyY6GJ+4aOdaO8JwePScNDEuqz7I4TwYCQmmzhUih3JcLs0U2bZWntXqy76szjMgPGZKxzmbJVygJJhVh3mZJ65chTEMQAKZuIghGB2acia+QKMMvj6IhS5E0II2TMYX28Iz0z9v2pLhsgv1wS8ad3QqZbx2PuPr81pjU3A65bDuqyxjFA0ht0DCbFerJYMwGjKAMyVcoC1h1lkmFXBbB6PnW0sNmA4zINKDEPkmTMJbScxOmQ92SOIJgWz3ShsO846cjK8Hg2nHzaxlLtFCCGkjDA5zJkEs5JhzpZfBoDJljaMw3KIYwhaav3oHoygvSeEvVpq5OVi+InP4zJNwB0qrbV+rE3+3+owq5EMmV1WHGbRmdwbSjwn2cZiJ65jl2Gmw0xIQUiHOcdXZWO1H9+aP7NoZe6EEELKH7VaLmOGWWnJ6M4w5U/QVO03DeDIJb8sSDceu6PX6GDO5cxrrrTW5ugwR1MdZrFAcHe/WTBndJhtJv0NyCq60eEwUzATxxDL02EmhBBC8kWMx9a0zBPmTA5zDoJZ0zQ58Q8oTDBbh5fIDuaa4hpDqtGU3mE2RmN7bQTzrv7EvvWFso+3Fi7yQCSGePKzXjrMo2AsNkDBTByEGFySy4I/QgghpBDEeOwqnyejayszzGHFYc4QyQCMEdnj6wKm6Ec2RD45VTAXd8GfIBeHucdmNDYAjKlKbL8z2eFsRDLSH3wIh1nXE/3NgJJhHiUO8+iQ9WSPQDjMFMyEEEJKxX7janHUzEbsPz5zg4UQjroObO9OCNdMDjMATB2baMo4NA93GTAEbDqHubmIC/4Ao8oOsHOYjcWOoaS4VR3mRukwh+V2QOZO64DHDU1LPJf9oRgqfZ5R15IxOvaS7BFQMBNCCCk1Po8Ld513ZNbt/B4XvG4NkZguJ/el62AWfGPuFGzdPYjvnbhXXvskM8zdlgxziRzmFpPDbG6aEgcFug50JRs6zA5zYnurw5wpkuFyaaj0utEfjiWdZb90mDM5006Cgpk4BimYi7iwgRBCCCkETdNQ7fdg90DEEMxZHOaJDZW48WuH5X1fIr6x3SKYxfetxY5kZHCY1QOF3QNheZmgMSmwd/VZIxmZJWWl34P+cEw6y9JhHiUZ5tGxl2SPIEqHmRBCiIOoDgjBnIxkZMkwF4oQsJ19IURicRmB2NadEOrj6oormKv9HnznhJkYCMfkTAOBpmmoCXixqz8MMXjQftGfiGQkhG+mITBAIqvcCSO7zAwzIQXCSAYhhBAnUe33AhiUdW/F7EJWaazywed2IRyLo70niIkNlYjHdekwq1V4xeLyk/ZJ+7OagEcKYmDokQwgtYuZPcyEFIhRK0fBTAghZOSpSYpA8fmULZJRKC6Xhpa6RDRC5Jh39ocRjsahaeYIxXBQY3GLvW7jc7kx2ZKxeyCMeFxHf45ZZFHhJ/qXB3IYqe0kKJiJYzAGl1AwE0IIGXmswjHbor+hIHLM25KCeXsyjtFc4zdFIoYDMbRFoDrMDVWJn8XiOnqD0ZxaMgAjqyy2F0KbDjMheUKHmRBCiJOw5nJLlWEGjJxyW1Ioi4WG+fQ5FwvrgYI66c/vccsFfjv7Qzkv+hN1dJ19iaq8gbAYeEKHmZC84OASQgghTsIqAksVyQAUhzm5wFD8O6EE+eVs1ATSO8yAeeFffw6T/gDjcWzdnTgQEEKbDjMheRKLJyYKUTATQghxAqkOcykFs7mLeXuJGjJyIZPDDJgX/vXluOhvQkNSMCedc+kwUzATkh/RGB1mQgghzqFGEYEurbQVaEIYb5eRjIRwHjcCDnOtIpg1LfVzudHkMOcWyZjYkMZhZiSDkPyI6xxcQgghxDmoIrC2wguthJ9P1uElooN5Qv3wO8yqk+5zu1Iet3CYt3cNyhkK2bLIMpLRNQhd1+kwE5KJ3f1h6KIJ3QIHlxBCCHES1UqWt5T5ZQAYV28MLwlH445Z9GeNYwDAmOS0v027BuRl2YSv6JIeCMfQ0RuSn/l0mAmxsG5rN2b/fCWufuQd259zcAkhhBAnoQrHUjZkAMCYysTwEl1PNGR09CbaJEoxtCQb6qI/64I/wIhkCMFc6XNnrYQNeN0YW53ocH6/vVdeXumlYCbExPvtvYjrwPq2HtufUzATQghxEmqGuVRT/gQulyYHlKzZ3AVdT7i7QpwOJ+qBgl0H9Jjk8JJNuxIueLYFfwKx8O/99j4AgN/jgmeYO6YLZXTsJSkLIrF48l/7SAZ7mAkhhDgJtSWj1JEMwJjo99rG3QASMY2RGOaVq8O8I9mpnG3Bn2Bi0i3/IOkw5yq0nQAFMxk2hFCOJoWzFTrMhBBCnIRp0d8wCObxVsE8ApVygCXDbCOYGyyud67DRwyHOSGYK0vYOlJsKJjJsCEc5nA6h5mDSwghhDiI6mHMMANAa3KBn4gujh+BBX9A9kiGNSaSq8M8QTrMiUjGaGnIACiYyTASpcNMCCFkFFHjH76WDAAYn2zKSH4cjsiCP8D8WO0c5jFDFMy9o6yDGaBgJsNIWGaY7QWzMbiEL0tCCCEjT8DrkutqSjnlT9Baa45gjBuBDmYgsRjP6048bp871cSq9LnhV4R0vov+5PXoMBOSihDE6Rb9GYNLhm2XCCGEkLRomiZjGcMRybA6yiPlMGuaJhf+2TnMmqaZYhmFCmZmmAmxIZLNYY7TYSaEEOIsRNxgOFsyBCOVYQaMHLNdhhkwhpcAuUcyagNeUz6aLRmE2BCJJ4SyEMZWjAzzsO0SIYQQkpF9Wmrg0oC9mmtKfl+NVT7TZL3xIxTJAAzBbDfpDzC6mIH8ohUTFNd8NDnMo0fak1FPJJqMZESzLfqjYiaEEOIM/uesw7CrPzwsI6o1LTG8ZNOuAdT4PaY+5OFGLHj02kQyAFgiGbkL34kNlVjfxh5mQtISTTrMwmm2wsElhBBCnIbf4x4WsSwQ3csjteBPIBxmf1qHOf9IBgBMbBidDjMFMxk2cp30x1o5QggheypCMI/Ugj9BpkV/gFkw5+MUq5EMtmQQYoMQyrG4jrhNjjlKwUwIIWQPZ9KYysS/DZUjuh9ZF/0V6DCrTRmjqYd59Eh7MupR2zEi8Tj8LvMbJc5Jf4QQQvZwvjpnMvpCUSyaO3VE92NmczUAYPIYe+FOh3mU8swzz0DTNNuvV155BQDwySef2P78xRdfNN3W/fffj1mzZiEQCODAAw/EihUrRuIhlR1RJYoRtYllGINLKJgJIYTsmYyrq8BPPrs/po6tGtH9+OoRk7Hiu8fg3KOn2f680EV/qsPMRX8jwLx587B9+3bT13nnnYdp06bh8MMPN237z3/+07Td7Nmz5c9eeOEFfOUrX8E3v/lNvPHGGzjttNNw2mmnYd26dcP9kMqOsOow23QxG4NLKJgJIYSQkcTl0rDf+Nq0JlahkYzGKh8C3oT8rOKiv+HH5/OhtbVVfjU2NuL//u//cM4550CzCLDGxkbTtl6vUdtyww034OSTT8YPfvAD7Lvvvrjmmmtw2GGH4fe///1wP6SyI2oSzHYZ5sTP6TATQgghzqZR7WHOQzBrmob9x9cBMPLao4GyEcxWHn74YezcuRPnnHNOys8+97nPobm5GUcffTQefvhh089Wr16NBQsWmC5buHAhVq9enfa+QqEQenp6TF8kFVUk2znM4iLWyhFCCCHOprbCg+P3acLc6Y2meEYu/Pkbh+Mf3ztmVAnm0RMeyZO//OUvWLhwISZOnCgvq66uxrJly3DUUUfB5XLhwQcfxGmnnYaHHnoIn/vc5wAAbW1taGlpMd1WS0sL2tra0t7Xddddh6uvvro0D6SMUEWyXYY5lnSYXRTMhBBCiKPRNA23nnNEQdcdU+UzRTpGA453mJcsWZJ2MZ/4Wr9+vek6W7ZswRNPPIFvfvObpsvHjh2LxYsXY86cOfjUpz6FX/7ylzjrrLPw61//ekj7eOWVV6K7u1t+bd68eUi3V65YWzKs0GEmhBBCiBNxvMN8+eWX4+yzz864zfTp003f33rrrWhsbJSucSbmzJmDlStXyu9bW1vR3t5u2qa9vR2tra1pb8Pv98Pv96f9OUkQjWeLZDDDTAghhBDn4XjB3NTUhKamppy313Udt956K77xjW+YFvOlY82aNRg3bpz8fu7cuVi1ahUuvfRSednKlSsxd+7cvPabpBKOZo5kcHAJIYQQQpyI4wVzvjz11FPYsGEDzjvvvJSf3X777fD5fDj00EMBAH/7299wyy234Oabb5bbfO9738Nxxx2HZcuW4dRTT8U999yDV199FX/605+G7TGUK6rDHM5UK0fBTAghhBAHUXaC+S9/+QvmzZuHWbNm2f78mmuuwcaNG+HxeDBr1izce++9OP300+XP582bh7vvvhtXXXUVfvjDH2KvvfbCQw89hAMOOGC4HkLZkm3RHweXEEIIIcSJlJ1gvvvuu9P+bNGiRVi0aFHW2zjjjDNwxhlnFHO3CMwimYNLCCGEEDJacHxLBikfsk36Y4aZEEIIIU6EgpkMG9km/cWSgtnjpmAmhBBCiHOgYCbDhiqSo7a1comfuxjJIIQQQoiDoGAmw4Yaw7BryRCRDI+LL0tCCCGEOAcqEzJsZGvJiMsM87DtEiGEEEJIVihNyLAQi+tQapizLPrjy5IQQgghzqEgZTI4OIiBgQH5/caNG7F8+XI8+eSTRdsxUl5YBXIkbuMw63SYCSGEEOI8CpImn//853HHHXcAALq6ujBnzhwsW7YMn//85/GHP/yhqDtIyoOoRSBHojYOc4wOMyGEEEKcR0HK5PXXX8cxxxwDAHjggQfQ0tKCjRs34o477sBvf/vbou4gKQ+sAjkaTz+4xMMeZkIIIYQ4iIIE88DAAGpqagAATz75JL74xS/C5XLhyCOPxMaNG4u6g6Q8iFgEsl0Pc5S1coQQQghxIAUJ5pkzZ+Khhx7C5s2b8cQTT+Ckk04CAHR0dKC2traoO0jKA6tAtlv0x8ElhBBCCHEiBQnmpUuX4vvf/z6mTp2KOXPmYO7cuQASbvOhhx5a1B0k5YF1UEkmwUyHmRBCCCFOwlPIlU4//XQcffTR2L59Ow4++GB5+YknnogvfOELRds5Uj5YBbJdD7N0mJlhJoQQQoiDKEgwA0BraytaW1tNlx1xxBFD3iFSnlgjGXaT/mKyh5mCmRBCCCHOoSDB3N/fj1/+8pdYtWoVOjo6ELcs6Pr444+LsnOkfMjFYY5SMBNCCCHEgRQkmM877zw8++yz+PrXv45x48ZBY+aUZCGXRX/G4BK+ngghhBDiHAoSzP/4xz/w2GOP4aijjir2/pAyJWXSn53DnNyGgpkQQgghTqKgloyGhgaMGTOm2PtCyhhrBMN+cEniXy76I4QQQoiTKEgwX3PNNVi6dCkGBgaKvT+kTEl1mG1GYydFNGvlCCGEEOIkCopkLFu2DB999BFaWlowdepUeL1e089ff/31ouwcKR9yiWRwcAkhhBBCnEhBgvm0004r8m6QciefSX9uOsyEEEIIcRB5C+ZoNApN03Duuedi4sSJpdgnUoZYM8vWTLOu6zLDzEV/hBBCCHESeWeYPR4Pfv3rXyMajZZif0iZEo6aBbN1cIlwlwEKZkIIIYQ4i4IW/Z1wwgl49tlni70vpIyJxi0tGdZBJhTMhBBCCHEoBWWYTznlFCxZsgRvvfUWZs+ejaqqKtPPP/e5zxVl50j5IDLLFV43BiOxlEyzGFoCAB5XQcdxhBBCCCEloSDB/K1vfQsAcP3116f8TNM0xGKxoe0VKTuEQK70CcGc3mGmXiaEEEKIkyhIMMdthk4QkgnpMPvcQH9qS0YsRoeZEEIIIc6EyoQMCyKzXOlzJ763ZJpjSiSDEWZCCCGEOImCHOaf/exnGX++dOnSgnaGlC/hpINc4Uu85CJR+5YMt0uDxh5mQgghhDiIggTz3//+d9P3kUgEGzZsgMfjwYwZMyiYSQrSYfYmHOaI1WHm0BJCCCGEOJSCBPMbb7yRcllPTw/OPvtsfOELXxjyTpHyI2KJZKRkmBWHmRBCCCHESRQtw1xbW4urr74aP/7xj4t1k6SMiMhIRjLDHEvjMFMwE0IIIcRhFHXRX3d3N7q7u4t5k6RMsDrM1kl/UQpmQgghhDiUgiIZv/3tb03f67qO7du3469//StOOeWUouwYKS+isofZk/zePpLhoWAmhBBCiMMoSDD/5je/MX3vcrnQ1NSERYsW4corryzKjpHywtTDDCCuJ0SycJSFYHZRMBNCCCHEYRQkmDds2FDs/SBljmjFEC0ZQEJEu12J7+kwE0IIIcSpFJRhPvfcc9Hb25tyeX9/P84999wh7xQpP0TvsnCYAXNThhhc4mKtHCGEEEIcRkGC+fbbb8fg4GDK5YODg7jjjjuGvFOk/IjGxaI/46SG2pQRS/7c46ZgJoQQQoizyCuS0dPTA13Xoes6ent7EQgE5M9isRhWrFiB5ubmou8kGf2ISX9+jwsuLZFhNjnMyf9ycAkhhBBCnEZegrm+vh6alhhdvPfee6f8XNM0XH311UXbOVI+iFYMj1uDx+1COBo3TfsTDjRr5QghhBDiNPISzE8//TR0XccJJ5yABx98EGPGjJE/8/l8mDJlCsaPH1/0nSSjH+Em+9wueF0awjBXy8WFw0zBTAghhBCHkZdgPu644wAkWjImT54MjafPSY6ISX8etwtejwsIx0yRDDrMhBBCCHEqBS36mzJlCp577jmcddZZmDdvHrZu3QoA+Otf/4rnnnuuqDtIiktbdxDXPPoOPtnRP6z3K8Sx163B43IlL1MX/bFWjhBCCCHOpCDB/OCDD2LhwoWoqKjA66+/jlAoBCAxGvsXv/hFUXeQFJc7X9yIvzy3Ab9/+sNhvV/RiOF1u+BLNmGYF/1xcAkhhBBCnElBgvnnP/85brrpJvz5z3+G1+uVlx911FF4/fXXi7ZzpPhs3DUAAFi7uWtY79dwmF3wuOkwE0IIIWT0UJBgfu+993DsscemXF5XV4eurq6h7hMpIVt2JwTzh5196A1Ghu1+I0rPstfOYebgEkIIIYQ4lIIEc2trKz78MPWU/nPPPYfp06cPeadI6diyOzFwRteBt7Z2D9v9RqIJQexzu+BNOsxRO4eZg0sIIYQQ4jAKEsznn38+vve97+Gll16CpmnYtm0b7rrrLlx++eW4+OKLi72PAIBrr70W8+bNQ2VlJerr62232bRpE0499VRUVlaiubkZP/jBDxCNRk3bPPPMMzjssMPg9/sxc+ZM3HbbbSm3c+ONN2Lq1KkIBAKYM2cOXn755RI8ouEnGImhszckv39zy/AJ5qjJYRaRjNQMs9tV0EuSEEIIIaRk5FUrJ1iyZAni8ThOPPFEDAwM4Nhjj4Xf78cPfvADnHfeecXeRwBAOBzGGWecgblz5+Ivf/lLys9jsRhOPfVUtLa24oUXXsD27dvxjW98A16vVy5E3LBhA0499VRcdNFFuOuuu7Bq1Sqcd955GDduHBYuXAgAuPfee7F48WLcdNNNmDNnDpYvX46FCxfivffeG/VTDIW7LBjOHHM4qmaYUyMZUSGYaTATQgghxGEUZOdpmoYf/ehH2LVrF9atW4cXX3wRnZ2dqKurw7Rp04q9jwCAq6++GpdddhkOPPBA258/+eSTeOedd3DnnXfikEMOwSmnnIJrrrkGN954I8LhMADgpptuwrRp07Bs2TLsu+++uOSSS3D66afjN7/5jbyd66+/Hueffz7OOecc7LfffrjppptQWVmJW265pSSPazgR+WWxrm54HeZkS4bLpTjMqZEMOsyEEEIIcRp5qZNQKIQrr7wShx9+OI466iisWLEC++23H95++23ss88+uOGGG3DZZZeVal8zsnr1ahx44IFoaWmRly1cuBA9PT14++235TYLFiwwXW/hwoVYvXo1gISL/dprr5m2cblcWLBggdzGjlAohJ6eHtOXExEO8xHTxkDTgK1dg+joDQ7LfcuWDI+x6E/ENABVMA/L7hBCCCGE5Exe8mTp0qX4wx/+gKlTp2LDhg0444wzcMEFF+A3v/kNli1bhg0bNuD//b//V6p9zUhbW5tJLAOQ37e1tWXcpqenB4ODg9ixYwdisZjtNuI27LjuuutQV1cnvyZNmlSMh1R0hGCe1VqLmU3VAIA3N5feZdZ13Zj0pzjMIqYBqLVyVMyEEEIIcRZ5qZP7778fd9xxBx544AE8+eSTiMViiEajWLt2Lf7zP/8Tbrc7rztfsmQJNE3L+LV+/fq8bnMkuPLKK9Hd3S2/Nm/ePNK7ZIuIZExsqMDBk+oBAG9u6Sr5/Yo4BpBoyRCiWL2cg0sIIYQQ4lTyWvS3ZcsWzJ49GwBwwAEHwO/347LLLoNWYHfu5ZdfjrPPPjvjNrnW1LW2tqa0WbS3t8ufiX/FZeo2tbW1qKiogNvthtvttt1G3IYdfr8ffr8/p/0cSYTDPLGhAn6vGw+8tgVrhiHHrNbHedwafJ70k/44uIQQQgghTiMvwRyLxeDz+Ywrezyorq4u+M6bmprQ1NRU8PVV5s6di2uvvRYdHR2yzWLlypWora3FfvvtJ7dZsWKF6XorV67E3LlzAQA+nw+zZ8/GqlWrcNpppwEA4vE4Vq1ahUsuuaQo+zmSGIK5EuPrKwAkHGZd1ws+6MmFsCKMvYrDbFr0x8ElhBBCCHEoeQlmXddx9tlnSzc1GAzioosuQlVVlWm7v/3tb8XbwySbNm3Crl27sGnTJsRiMaxZswYAMHPmTFRXV+Okk07Cfvvth69//ev41a9+hba2Nlx11VX49re/Lff3oosuwu9//3tcccUVOPfcc/HUU0/hvvvuw2OPPSbvZ/HixVi0aBEOP/xwHHHEEVi+fDn6+/txzjnnFP0xDSfBSAw7+hIdzBMbKlDp88DndqFrIIJNuwYwpbEqyy0UTtQkmDP3MNNhJoQQQojTyEswL1q0yPT9WWedVdSdycTSpUtx++23y+8PPfRQAMDTTz+N+fPnw+1249FHH8XFF1+MuXPnoqqqCosWLcLPfvYzeZ1p06bhsccew2WXXYYbbrgBEydOxM033yw7mAHgzDPPRGdnJ5YuXYq2tjYccsghePzxx1MWAo42hLtc4/egrsILTdOw7/harN3chbVbuksqmI0Ff4lcumzJsBtcwiJmQgghhDiMvATzrbfeWqr9yMptt91mO5VPZcqUKSmRCyvz58/HG2+8kXGbSy65pCwiGCpiwd+EhgoZv5hYX4G1m7uwuz9c0vsWTrIYWCJbMpRIhjG4hIKZEEIIIc6CHV57CGp+WeB2iT5k3fY6xUJ2MCeFssfWYY6b9okQQgghxClQMO8hbFYq5QQiLxxTBoiUAjnlLymYfbYZ5sS/FMyEEEIIcRoUzHsIaqWcYLgcZjGgRGSXhcNsHo2djG1QMBNCCCHEYVAw7yHYRTKEcI3FSiuYo5YpfvYtGYl/ObiEEEIIIU6DgnkPYatNJGO4M8w+j1kwR+kwE0IIIWQUQMG8BzAYjmFHX6IJY5LqMCcd39gwCWYhhsW/ESU7LQaXMMNMCCGEEKdBwbwHsLUr4S7X+D2orTCaBIfPYTYv+jMiGarDzFo5QgghhDgTCuY9gM0ivzym0jQCe9haMmLmRX92g0tEPIODSwghhBDiNCiY9wC6BhJxjMYqn+nykephtl30p9NhJoQQQogzoWDeAxgMJ4Rphc9tutxwmIcnkuGRtXIZIhnMMBNCCCHEYVAw7wEMRmIAgAqvWTC7k4v+ht9h1kyXAxTMhBBCCHEuFMx7AME0gnnYepjTLPqL2jjMrJUjhBBCiNOgYN4DGAwnBbPP6jAP06S/lEV/LtPlgCGYObiEEEIIIU6DgnkPQEQyAlaHeZhbMkR2WTjb0XiqYKbDTAghhBCnQcG8B5A+wzy8Pcy+pGAW/0aixv1GZYaZL0lCCCGEOAuqkz2AoIxkmH/dw9aSEc8+6S8uJ/2VdFcIIYQQQvKG8mQPYMRbMpJOstfjMv0bsRtcQoeZEEIIIQ6D6mQPIHuGucQtGUkn2Zu8P6/LpiWDg0sIIYQQ4lAomPcAnNOSIRxm9jATQgghZPRAwbwHkLWHueQtGWLSX7Ilw8VJf4QQQggZPVAw7wFkbcko8eAS4ST7kgJdtmTYOMyslSOEEEKI06Bg3gOQGWbfyGSYI1aH2Z0q1OkwE0IIIcSpUDDvAQyGE07uiLVkWDPMyqQ/PbnYj4KZEEIIIU6FgnkPIG2GebhaMlJGYxuiWNy3aNKgYCaEEEKI06BgLnN0XTcyzCPUkiEiGVaHWf2Z2AUKZkIIIYQ4DQrmMicS06WLm66HOT5MkQyRXfYoDrOY9keHmRBCCCFOhYK5zBHuMpChJaPEtXIpGWZlml8kmviZ2AUOLiGEEEKI06BgLnNEftnt0kzZYUDtYS71pD8RyUjcn8ulpcRB6DATQgghxKlQMJc5csqf1w3N4t4OV0tGOGp2mAEjDiLcZ1HJTMFMCCGEEKdBwVzmyA5mSxwDGMaWDDmUxHi5GcNLRK1c3LRPhBBCCCFOgYK5zDEaMlJ/1cPXkpGc9OcxxLAxvEQs+mMPMyGEEEKcCQVzmRMM23cwAyMw6U9xmNXhJYDR1EHBTAghhBCnQcFc5gymGVoCKA5zbHhbMtT/i/HYdJgJIYQQ4lQomMuczBnmxK9/uCf9qf8XYjquUzATQgghxJlQMJc5siXDZ+Mwu0dm0h8AeCyL/ugwE0IIIcSpUDCXOcEMkYzhyzCbJ/0BhniOxOKIx3XoYjQ2B5cQQgghxGFQMJc5OWWY4zp0vXSiWbZkmDLMxpTBmHLf6sJAQgghhBAnQHVS5gyGE2I1YBPJUDuPS2kyi4V9HptFf+GobnK43W46zIQQQghxFhTMZU4uDjNgjKYuBWGbRX8el+Iwq4KZkQxCCCGEOAwK5jInc4bZ+PWXMscsFvSpi/58HiPDrC465KI/QgghhDgNCuYyJ2NLhslhLo1gjseNyIWpJcMlauV0ObTEuk+EEEIIIU6AgrnMydzDbIjTWKw0gjmiRD3StWSoYp16mRBCCCFOg4K5zMmUYXa5NIjIcKkc5ogixH1pJv2pQ0s0ZpgJIYQQ4jAomMscmWH22f+qS93FrI7dVh1tddIfh5YQQgghxMlQMJc5MsNs4zADahdzaVoyREOGppkFsTrpT2SY2ZBBCCGEECdCwVzmZMowA0ZTRukc5uSCP5fLFLewyzB76DATQgghxIFQMJc5mTLMgHnaXymI2HQwq99HY3HEku42h5YQQgghxImMGsF87bXXYt68eaisrER9fX3Kz9euXYuvfOUrmDRpEioqKrDvvvvihhtuMG3zzDPPQNO0lK+2tjbTdjfeeCOmTp2KQCCAOXPm4OWXXy7lQyspwQy1ckDpM8wRmyl/iftNTvqL6RAxZ0YyCCGEEOJEPCO9A7kSDodxxhlnYO7cufjLX/6S8vPXXnsNzc3NuPPOOzFp0iS88MILuOCCC+B2u3HJJZeYtn3vvfdQW1srv29ubpb/v/fee7F48WLcdNNNmDNnDpYvX46FCxfivffeM203WsjZYS5RrVw4msZh9hgOs8hPc9EfIYQQQpzIqBHMV199NQDgtttus/35ueeea/p++vTpWL16Nf72t7+lCObm5mZblxoArr/+epx//vk455xzAAA33XQTHnvsMdxyyy1YsmTJ0B7ECJA9w1xah7m9NwgAGFvtN10e8CT2ZyASg1hvSMFMCCGEECcyaiIZhdDd3Y0xY8akXH7IIYdg3Lhx+PSnP43nn39eXh4Oh/Haa69hwYIF8jKXy4UFCxZg9erVae8nFAqhp6fH9OUE4nEdwUhCjaaLZIjccKlaMrbsGgAATGyoNF0+psoHANjdH6bDTAghhBBHU7aC+YUXXsC9996LCy64QF42btw43HTTTXjwwQfx4IMPYtKkSZg/fz5ef/11AMCOHTsQi8XQ0tJiuq2WlpaUnLPKddddh7q6Ovk1adKk0jyoPAlFDRGcLpJR6paMzbsHAQCTx9gL5p39YdPgEkIIIYQQpzGignnJkiW2i/DUr/Xr1+d9u+vWrcPnP/95/OQnP8FJJ50kL99nn31w4YUXYvbs2Zg3bx5uueUWzJs3D7/5zW+G9DiuvPJKdHd3y6/NmzcP6faKhYhjAOkjGaVuydi0M+EwTxpTYbq8MSmYd/WHZX6agpkQQgghTmREM8yXX345zj777IzbTJ8+Pa/bfOedd3DiiSfiggsuwFVXXZV1+yOOOALPPfccAGDs2LFwu91ob283bdPe3o7W1ta0t+H3++H3+9P+fKQQgtnncaUVo6XOMG/enRTMlkhGgxLJiLGHmRBCCCEOZkQFc1NTE5qamop2e2+//TZOOOEELFq0CNdee21O11mzZg3GjRsHAPD5fJg9ezZWrVqF0047DQAQj8exatWqlIWDo4FsU/6A0jvMm3cJh9ksmIXDvHsgjEjyvl2slSOEEEKIAxk1LRmbNm3Crl27sGnTJsRiMaxZswYAMHPmTFRXV2PdunU44YQTsHDhQixevFhmjt1utxTly5cvx7Rp07D//vsjGAzi5ptvxlNPPYUnn3xS3s/ixYuxaNEiHH744TjiiCOwfPly9Pf3y9aM0UQwS6UcoDrMxV/01z0YQU8wCgCY2GCOZAiHOa4Du/pDiX3h4BJCCCGEOJBRI5iXLl2K22+/XX5/6KGHAgCefvppzJ8/Hw888AA6Oztx55134s4775TbTZkyBZ988gmARAvG5Zdfjq1bt6KyshIHHXQQ/vnPf+L444+X25955pno7OzE0qVL0dbWhkMOOQSPP/54ykLA0YDsYE7TkAGUtodZuMtjq32o8ptfal63C7UBD3qCUXT0JAQzB5cQQgghxImMmpaM2267Dbqup3zNnz8fAPDTn/7U9udCLAPAFVdcgQ8//BCDg4PYuXMnnn76aZNYFlxyySXYuHEjQqEQXnrpJcyZM2eYHmVxEZGMdAv+gNK2ZGxOUyknEE0Znb1JwcwMMyGEEEIcyKgRzCR/jCl/6X/NpcwwywV/Y7II5j4KZkIIIYQ4FwrmMiaYQyRD5IZL4zAnOpgnWfLLgjFViWYROsyEEEIIcTIUzGXMSLdkZHeYvQCADgpmQgghhDgYCuYyRkQyMmeYS9eSITLM1il/glSHmS9HQgghhDgPKpQyZjCHWrlSOczxuI4tu0Ukw14wiy7m7sEIAA4uIYQQQogzoWAuY4LhHDLMJWrJ6OwLIRSNw6UB4+oDttuILmYBB5cQQgghxIlQMJcxeTnMRe5hFnGMcXUV8LrtX2aNFsFMh5kQQgghToSCuYzJL8NcZMEsF/zZN2QARq2cgIv+CCGEEOJEKJjLmMFwYiFfTpP+ii2Yk5Vy6Rb8ARTMhBBCCBkdUDCXMcEcIhlGD3NxWzJEJCPdgj+AgpkQQgghowMK5jJmJFsyNu3K3MEMAJU+N/we4yVIwUwIIYQQJ0LBXMaIwSWBEWjJkJVyGTLMmqaZFv5x0R8hhBBCnAgFcxkzUg5zLK5je3dCME/MEMkAzNVyLgpmQgghhDgQCuYyJqcMcwlaMnqDEYibs+aUrYyhw0wIIYQQh0PBXMZIh9mX/tdcih7mnsEogERGOV0Hs0CNZHBwCSGEEEKcCAVzGXDWzS/h0J89iXe395gulxnmnBzm4rVk9AQTo65rA96s2zbQYSaEEEKIw6FgLgN2D4SxeyCCbV2DpstzyzAnXgLFzDD3DCYFc4Un67aqw8yWDEIIIYQ4EQrmMmBCfaKJYqtFMMsMc6aWDHfxM8z5OMxjqvzy/xTMhBBCCHEiFMxlwHgbwRyJxRFJ5pKHuyVDZJhrK3IRzMY2FMyEEEIIcSIUzGXAxIaEYN7WFZSXCXcZyDXDXAqHOXskgw4zIYQQQpwOBXMZIB3m3QPyMpFf1jSYpulZKY3DLDLMuTjMXPRHCCGEEGdDwVwGiAyzyWEOJ1ovKrxuaBnq2krTkpGMZOSUYebgEkIIIYQ4GwrmMkA4zO29QYSjCeErYhGVvsyxCNmSUdQe5txbMuorvBA6mQ4zIYQQQpwIBXMZMLbaB5/HBV0H2nsSLvNHnX0AgKmNmUdTlzbDnN1hdrk0NFQmXGYOLiGEEEKIE6FgLgM0TZOxjC27E00ZH3UkBPNeLdUZrzvSLRmAEcugw0wIIYQQJ0LBXCYYOeaEYP4gKZhnNGUWzCPdwwwY0/7YkkEIIYQQJ0LBXCaMrw8AMLqYP5QOc03G64kYRLSYi/7yyDADwF7NCVE/rq6iaPtACCGEEFIsclM0xPGMVxzmSCyODTv6AQAzm7M4zCXJMOfekgEAPzp1X5w+eyIOmVRftH0ghBBCCCkWFMxlgjoee+POAUTjOqp8boyvC2S8XrEzzNFYHH2h/DLMlT4PDp3cUJT7J4QQQggpNoxklAmqYP6woxcAMKO5OmMHM1D8DHNv0l0GgJocJv0RQgghhDgdCuYyYUKDEcn4oD2RX56ZZcEfUPweZqP/2Q2vmy8vQgghhIx+qGjKhNZk9CIYiePlT3YBAGZmqZQDip9hlpVyOeaXCSGEEEKcDgVzmeD3uNFc4wcAvLQhKZhzcpiL25IhK+VybMgghBBCCHE6FMxlhGjKEOOxs1XKAaVwmPPrYCaEEEIIcToUzGWEyDEDgM/twqSG7L3GxW7JMBxmCmZCCCGElAcUzGWEaMoAgOlNVfDksOjOk1z0V/wMMyMZhBBCCCkPKJjLCFUwz8gysETgLnYkgw4zIYQQQsoMCuYyYrwimPfKUTAXu4eZGWZCCCGElBsUzGWE6jBnG4ktKH6GWUz5YySDEEIIIeUBBXMZMcHkMGdvyADYkkEIIYQQkg3agGVEbYUHx+3dhK7BCGY0VeV0ndL1MFMwE0IIIaQ8oGAuIzRNw23nfEr+PxdK15JBwUwIIYSQ8oCCuczIVSgLStfDzJcWIYQQQsoDZpj3cESGWdeBeBFEMzPMhBBCCCk3KJj3cNxuw5EeqsscjcXRH44BYIaZEEIIIeUDBfMejnCYgaHnmHuTlXIAUMNJf4QQQggpEyiY93DcLtVhHlpThsgvV/rc8OYwlpsQQgghZDQwalTNtddei3nz5qGyshL19fW222ialvJ1zz33mLZ55plncNhhh8Hv92PmzJm47bbbUm7nxhtvxNSpUxEIBDBnzhy8/PLLJXhEzkC0ZABDd5jZkEEIIYSQcmTUCOZwOIwzzjgDF198ccbtbr31Vmzfvl1+nXbaafJnGzZswKmnnorjjz8ea9aswaWXXorzzjsPTzzxhNzm3nvvxeLFi/GTn/wEr7/+Og4++GAsXLgQHR0dpXpoI4piMA85w8yGDEIIIYSUI6NG2Vx99dUAYOsIq9TX16O1tdX2ZzfddBOmTZuGZcuWAQD23XdfPPfcc/jNb36DhQsXAgCuv/56nH/++TjnnHPkdR577DHccsstWLJkSZEejXPQNA0el4ZoXC+Cw8yGDEIIIYSUH6PGYc6Vb3/72xg7diyOOOII3HLLLdB1QwSuXr0aCxYsMG2/cOFCrF69GkDCxX7ttddM27hcLixYsEBuY0coFEJPT4/pazSRqYv5+Q934MOOvpxuh1P+CCGEEFKOlJVg/tnPfob77rsPK1euxJe+9CV861vfwu9+9zv587a2NrS0tJiu09LSgp6eHgwODmLHjh2IxWK227S1taW93+uuuw51dXXya9KkScV9YCVGNGXEYmbBvGX3AL5280u44K+v5nQ7IsNcR8FMCCGEkDJiRAXzkiVLbBfqqV/r16/P+fZ+/OMf46ijjsKhhx6K//f//h+uuOIK/PrXvy7hI0hw5ZVXoru7W35t3ry55PdZTAyH2dySsWX3IABga/LfbEiHmZVyhBBCCCkjRlTZXH755Tj77LMzbjN9+vSCb3/OnDm45pprEAqF4Pf70draivb2dtM27e3tqK2tRUVFBdxuN9xut+026XLRAOD3++H3+wvez5HGk6yAs2aYuwbCAIBQNI5QNAa/x53xdmSGmQ4zIYQQQsqIERXMTU1NaGpqKtntr1mzBg0NDVLMzp07FytWrDBts3LlSsydOxcA4PP5MHv2bKxatUq2a8TjcaxatQqXXHJJyfZzpEmXYe4aiMj/9waj8FdnEcxB1soRQgghpPwYNefON23ahF27dmHTpk2IxWJYs2YNAGDmzJmorq7GI488gvb2dhx55JEIBAJYuXIlfvGLX+D73/++vI2LLroIv//973HFFVfg3HPPxVNPPYX77rsPjz32mNxm8eLFWLRoEQ4//HAcccQRWL58Ofr7+2VrRjkiM8wWwbxbEcw9gxGMrU510TftHMA/1m3Hfxw4TnGYR83LihBCCCEkK6NG2SxduhS33367/P7QQw8FADz99NOYP38+vF4vbrzxRlx22WXQdR0zZ86UFXGCadOm4bHHHsNll12GG264ARMnTsTNN98sK+UA4Mwzz0RnZyeWLl2KtrY2HHLIIXj88cdTFgKWE+kd5rD8f48y9lrlv598Dw+v3YZfPfEeAp5EtIMOMyGEEELKiVEjmG+77baMHcwnn3wyTj755Ky3M3/+fLzxxhsZt7nkkkvKOoJhxXCYzYv+uiwOsx3tPcHkdXX0h2MAgBoKZkIIIYSUEaNGMJPSIR3mmDWSoTrM9oJZiOolp8zCmk1d6OgN4uBJdSXaU0IIIYSQ4YeCmcDjSteSoTrM9pGMrsGEqD565lhcdNyMEu0hIYQQQsjIUVaDS0hhpM0wD2Z2mHVdlwsD6ysZwyCEEEJIeULBTOBx59aSYSUYiSMcTeSeGyp9JdxDQgghhJCRg4KZ2DrMuq6bWjJ6bVoyRMbZ69ZQ6cvc0UwIIYQQMlqhYCa2LRkD4RgiyiJAu0hGl4xj+KBpWon3khBCCCFkZKBgJrYOs9qQAdhHMoQDXc9R2IQQQggpYyiYiW1LhtqQAdgPLulKimjmlwkhhBBSzlAwE9se5lwcZrFNHRsyCCGEEFLGUDATJcOc6jBX+xNV3ZkyzA0UzIQQQggpYyiYiW2GWeSTJ4+pBGA/uKR70Fj0RwghhBBSrlAwE6WH2WjJEB3MQjAPRmKIxOKm6+3uT0YyuOiPEEIIIWUMBTOBO7noL2oTyZjcWCkvs3Yxc9EfIYQQQvYEKJhJmgxzwj1urPIZOWbLwj9ZK8cMMyGEEELKGApmkrGHub7Si5qA/cI/Y3AJBTMhhBBCyhcKZmLvMCsL+moDCUFsXfgncs71FYxkEEIIIaR8oWAmtj3MRmWcD7UVqQ6zruvoHky40A1VdJgJIYQQUr5QMBPFYVZbMoxIhuEwG4K5PxxDJCmw6TATQgghpJyhYCYpLRmxuK50LHtRm6yNUx1mseDP73Ghwucezt0lhBBCCBlWKJiJ0sOcEMy9wQj0ZDqjvsKHWrHoT8kwc8EfIYQQQvYUKJhJSkuGWMxX5XPD53FJh7nX5DBzwR8hhBBC9gwomElKS4aRX06IYaNWznCYd7ODmRBCCCF7CBTMRHGYE4v+ukVDRrL9wm7RX9cgIxmEEEII2TOgYCbpHeZk3MJu0V93chuOxSaEEEJIuUPBTIyWjJg5wyzcY7vBJWKbOjrMhBBCCClzKJhJisNsdY/tBpeog00IIYQQQsoZCmaStiUj1WFO7WGur6DDTAghhJDyhoKZpPQwW1syRIa5PxxDNJZYGGgs+qPDTAghhJDyhoKZpLRkGHGLhFAWtXIA0BdK5JhZK0cIIYSQPQUKZpKSYe4aNIthr9uFCm9i/LVY+NfNDDMhhBBC9hAomInRkiEiGf2pcQt14Z+u6+xhJoQQQsgeAwUzSXWYbTqW1YV/vaGo3LaOi/4IIYQQUuZQMBMjwxzTEYrG0B+OATAyzIB5eElX0oGu8LoRSEY1CCGEEELKFQpmYnKYd/WH5WXCVQaA2uTCv57BaErGmRBCCCGknKFgJqaWjB29CTHcWO2DK3k5YHaYjZ5mLvgjhBBCSPlDwUxMPcw7+kMAgMYqv2kbNcPMoSWEEEII2ZOgYCamlowdvUnBXG12j42WjCi6kw0ZDVUUzIQQQggpfzzZNyHljpph3pnMMDdVmx3mmqTD/Pqm3Xjuwx0AgLoKRjIIIYQQUv5QMBMlw6xjZ18ahzkpmN/c0g0AqPF78NmDxw3jXhJCCCGEjAwUzMTkMO/oSzjMYy0O88SGCgCAz+PCorlT8K35M9FQRYeZEEIIIeUPBTMxt2RIh9ksmI/ZayzuOm8OpjdVYVxdxbDvIyGEEELISEHBTOBJLvqLxVSH2ewea5qGo2aOHfZ9I4QQQggZadiSQWwzzNZIBiGEEELIngodZiJ7mCOxOHqDUQCpi/4IIYQQQvZUKJiJdJi7ByOI64nLrINLCCGE/P/27j0oqvP8A/h3F2RZkIvcF0UFtXhHxbiDuZlIBMZJNNLUGNKAsRoJplZIasiMGNNJcbRFpx0HmwmIGTNqbGIy1cQURExVRAOSiyI/sShp3EWj5R7kss/vj4SjR2Axjcty+X5mzgy877tnn/POw57H43vOEtFgxSUZpDwlo6NYdnd2hJMjU4OIiIgIYMFMuHWFuYOPG68uExEREXXoNwXzm2++idmzZ8PFxQWenp6d+nNycqDRaLrcrl69CgAoKCjost9sNqv2tW3bNowePRrOzs4wGo04depUbxyi3XQ8JaODD5djEBERESn6TcHc0tKCp556ComJiV32L168GCaTSbVFRUXh4Ycfhp+fn2pseXm5atzt/Xv37kVycjLWr1+PkpIShIWFISoqSim6B6LOV5h5wx8RERFRh35z09+GDRsA/HAluSt6vR56/a0v1Lh27Rry8/ORlZXVaayfn1+XV6kBICMjA8uXL8fSpUsBANu3b8fBgweRnZ2NV1999ecdRB/leEfBzBv+iIiIiG7pN1eYf6p33nkHLi4u+OUvf9mpb9q0aTAYDHjsscdw/Phxpb2lpQXFxcWIjIxU2rRaLSIjI1FYWNjte928eRN1dXWqrT9xcLijYOYj5YiIiIgUA7ZgzsrKwjPPPKO66mwwGLB9+3a8//77eP/99xEUFIQ5c+agpKQEAPDdd9+hvb0d/v7+qn35+/t3Wud8u/T0dHh4eChbUFCQbQ7KRu68wswvLSEiIiK6xa4F86uvvtrtjXod2/nz53/yfgsLC1FWVoZly5ap2kNDQ/HCCy8gPDwcs2fPRnZ2NmbPno0tW7b8rONITU1FbW2tsn3zzTc/a3+9rdMaZl5hJiIiIlLYdQ1zSkoKEhISrI4JCQn5yft9++23MW3aNISHh/c4dtasWTh27BgAwMfHBw4ODqiurlaNqa6uRkBAQLf70Ol00On671XZTk/J4BVmIiIiIoVdC2ZfX1/4+vre0302NDTgvffeQ3p6+l2NLy0thcFgAAA4OTkhPDwchw8fxsKFCwEAFosFhw8fxqpVq+5pnH3JHReY4c2CmYiIiEjRb56SUVVVhRs3bqCqqgrt7e0oLS0FAIwdOxZDhw5Vxu3duxdtbW149tlnO+1j69atCA4OxqRJk9Dc3Iy3334b+fn5+Oc//6mMSU5ORnx8PGbOnIlZs2Zh69ataGxsVJ6aMRBpNBo4ajVo+/Gr/njTHxEREdEt/aZgTktLw86dO5Xfp0+fDgA4cuQI5syZo7RnZWVh0aJFXT42rqWlBSkpKfj222/h4uKCqVOnIi8vD4888ogyZvHixbh27RrS0tJgNpsxbdo0HDp0qNONgAONw48Fs5OjFm66fpMWRERERDanERGxdxADTV1dHTw8PFBbWwt3d3d7h3NXJqUdQmNLOwI9nHEida69wyEiIiKyqZ9Srw3Yx8rRT9PxpAwfN65fJiIiIrodC2YCADg6/JAK3q5cv0xERER0OxbMBOC2K8x8QgYRERGRCgtmAnDr2/74SDkiIiIiNRbMBOD2K8xckkFERER0OxbMBODWFWYuySAiIiJSY8FMAAAnxx9SwZdPySAiIiJS4TdUEAAg6ZGx+Oz/vsN9o73sHQoRERFRn8KCmQAAC6YNx4Jpw+0dBhEREVGfwyUZRERERERWsGAmIiIiIrKCBTMRERERkRUsmImIiIiIrGDBTERERERkBQtmIiIiIiIrWDATEREREVnBgpmIiIiIyAoWzEREREREVrBgJiIiIiKyggUzEREREZEVLJiJiIiIiKxgwUxEREREZAULZiIiIiIiK1gwExERERFZwYKZiIiIiMgKFsxERERERFawYCYiIiIissLR3gEMRCICAKirq7NzJERERETUlY46raNus4YFsw3U19cDAIKCguwcCRERERFZU19fDw8PD6tjNHI3ZTX9JBaLBVeuXIGbmxs0Go3N36+urg5BQUH45ptv4O7ubvP36084N93j3HSPc9M1zkv3ODfd49x0j3PTvd6YGxFBfX09AgMDodVaX6XMK8w2oNVqMWLEiF5/X3d3d/7BdYNz0z3OTfc4N13jvHSPc9M9zk33ODfds/Xc9HRluQNv+iMiIiIisoIFMxERERGRFSyYBwCdTof169dDp9PZO5Q+h3PTPc5N9zg3XeO8dI9z0z3OTfc4N93ra3PDm/6IiIiIiKzgFWYiIiIiIitYMBMRERERWcGCmYiIiIjIChbMRERERERWsGAeALZt24bRo0fD2dkZRqMRp06dsndIvSo9PR333Xcf3Nzc4Ofnh4ULF6K8vFw1Zs6cOdBoNKpt5cqVdoq497z++uudjnv8+PFKf3NzM5KSkuDt7Y2hQ4ciNjYW1dXVdoy494wePbrT3Gg0GiQlJQEYXDnz2Wef4fHHH0dgYCA0Gg0+/PBDVb+IIC0tDQaDAXq9HpGRkbhw4YJqzI0bNxAXFwd3d3d4enpi2bJlaGho6MWjsA1rc9Pa2oq1a9diypQpcHV1RWBgIJ577jlcuXJFtY+ucm3jxo29fCT3Vk85k5CQ0OmYo6OjVWMGY84A6PJzR6PRYPPmzcqYgZgzwN2dr+/mvFRVVYX58+fDxcUFfn5+eOWVV9DW1mbT2Fkw93N79+5FcnIy1q9fj5KSEoSFhSEqKgpXr161d2i95ujRo0hKSsLJkyeRm5uL1tZWzJs3D42Njapxy5cvh8lkUrZNmzbZKeLeNWnSJNVxHzt2TOlbs2YN/vGPf2Dfvn04evQorly5gkWLFtkx2t5z+vRp1bzk5uYCAJ566illzGDJmcbGRoSFhWHbtm1d9m/atAl/+ctfsH37dhQVFcHV1RVRUVFobm5WxsTFxeHs2bPIzc3FgQMH8Nlnn2HFihW9dQg2Y21umpqaUFJSgnXr1qGkpAQffPABysvL8cQTT3Qa+8Ybb6hy6aWXXuqN8G2mp5wBgOjoaNUx7969W9U/GHMGgGpOTCYTsrOzodFoEBsbqxo30HIGuLvzdU/npfb2dsyfPx8tLS04ceIEdu7ciZycHKSlpdk2eKF+bdasWZKUlKT83t7eLoGBgZKenm7HqOzr6tWrAkCOHj2qtD388MOyevVq+wVlJ+vXr5ewsLAu+2pqamTIkCGyb98+pa2srEwASGFhYS9F2HesXr1axowZIxaLRUQGb84AkP379yu/WywWCQgIkM2bNyttNTU1otPpZPfu3SIicu7cOQEgp0+fVsZ88sknotFo5Ntvv+212G3tzrnpyqlTpwSAXL58WWkbNWqUbNmyxbbB2VFX8xIfHy8LFizo9jXMmVsWLFggjz76qKptoOdMhzvP13dzXvr4449Fq9WK2WxWxmRmZoq7u7vcvHnTZrHyCnM/1tLSguLiYkRGRiptWq0WkZGRKCwstGNk9lVbWwsA8PLyUrW/++678PHxweTJk5GamoqmpiZ7hNfrLly4gMDAQISEhCAuLg5VVVUAgOLiYrS2tqryZ/z48Rg5cuSgy5+Wlhbs2rULzz//PDQajdI+WHPmdpWVlTCbzao88fDwgNFoVPKksLAQnp6emDlzpjImMjISWq0WRUVFvR6zPdXW1kKj0cDT01PVvnHjRnh7e2P69OnYvHmzzf/7uC8oKCiAn58fQkNDkZiYiOvXryt9zJkfVFdX4+DBg1i2bFmnvsGQM3eer+/mvFRYWIgpU6bA399fGRMVFYW6ujqcPXvWZrE62mzPZHPfffcd2tvbVUkDAP7+/jh//rydorIvi8WC3/3ud7j//vsxefJkpf2ZZ57BqFGjEBgYiC+//BJr165FeXk5PvjgAztGa3tGoxE5OTkIDQ2FyWTChg0b8OCDD+Lrr7+G2WyGk5NTpxO7v78/zGazfQK2kw8//BA1NTVISEhQ2gZrztypIxe6+pzp6DObzfDz81P1Ozo6wsvLa1DlUnNzM9auXYslS5bA3d1daf/tb3+LGTNmwMvLCydOnEBqaipMJhMyMjLsGK1tRUdHY9GiRQgODsbFixfx2muvISYmBoWFhXBwcGDO/Gjnzp1wc3PrtBRuMORMV+fruzkvmc3mLj+POvpshQUzDShJSUn4+uuvVet0AajWxU2ZMgUGgwFz587FxYsXMWbMmN4Os9fExMQoP0+dOhVGoxGjRo3Ce++9B71eb8fI+pasrCzExMQgMDBQaRusOUP/m9bWVvzqV7+CiCAzM1PVl5ycrPw8depUODk54YUXXkB6enqf+drfe+3pp59Wfp4yZQqmTp2KMWPGoKCgAHPnzrVjZH1LdnY24uLi4OzsrGofDDnT3fm6r+KSjH7Mx8cHDg4One4era6uRkBAgJ2isp9Vq1bhwIEDOHLkCEaMGGF1rNFoBABUVFT0Rmh9hqenJ37xi1+goqICAQEBaGlpQU1NjWrMYMufy5cvIy8vD7/5zW+sjhusOdORC9Y+ZwICAjrdaNzW1oYbN24MilzqKJYvX76M3Nxc1dXlrhiNRrS1teHSpUu9E2AfEBISAh8fH+XvZ7DnDAD861//Qnl5eY+fPcDAy5nuztd3c14KCAjo8vOoo89WWDD3Y05OTggPD8fhw4eVNovFgsOHDyMiIsKOkfUuEcGqVauwf/9+5OfnIzg4uMfXlJaWAgAMBoONo+tbGhoacPHiRRgMBoSHh2PIkCGq/CkvL0dVVdWgyp8dO3bAz88P8+fPtzpusOZMcHAwAgICVHlSV1eHoqIiJU8iIiJQU1OD4uJiZUx+fj4sFovyD42BqqNYvnDhAvLy8uDt7d3ja0pLS6HVajstSRjI/vOf/+D69evK389gzpkOWVlZCA8PR1hYWI9jB0rO9HS+vpvzUkREBL766ivVP7g6/qE6ceJEmwZP/diePXtEp9NJTk6OnDt3TlasWCGenp6qu0cHusTERPHw8JCCggIxmUzK1tTUJCIiFRUV8sYbb8jnn38ulZWV8tFHH0lISIg89NBDdo7c9lJSUqSgoEAqKyvl+PHjEhkZKT4+PnL16lUREVm5cqWMHDlS8vPz5fPPP5eIiAiJiIiwc9S9p729XUaOHClr165VtQ+2nKmvr5czZ87ImTNnBIBkZGTImTNnlCc9bNy4UTw9PeWjjz6SL7/8UhYsWCDBwcHy/fffK/uIjo6W6dOnS1FRkRw7dkzGjRsnS5Yssdch3TPW5qalpUWeeOIJGTFihJSWlqo+fzru1j9x4oRs2bJFSktL5eLFi7Jr1y7x9fWV5557zs5H9vNYm5f6+np5+eWXpbCwUCorKyUvL09mzJgh48aNk+bmZmUfgzFnOtTW1oqLi4tkZmZ2ev1AzRmRns/XIj2fl9ra2mTy5Mkyb948KS0tlUOHDomvr6+kpqbaNHYWzAPAX//6Vxk5cqQ4OTnJrFmz5OTJk/YOqVcB6HLbsWOHiIhUVVXJQw89JF5eXqLT6WTs2LHyyiuvSG1trX0D7wWLFy8Wg8EgTk5OMnz4cFm8eLFUVFQo/d9//728+OKLMmzYMHFxcZEnn3xSTCaTHSPuXZ9++qkAkPLyclX7YMuZI0eOdPk3FB8fLyI/PFpu3bp14u/vLzqdTubOndtpzq5fvy5LliyRoUOHiru7uyxdulTq6+vtcDT3lrW5qays7Pbz58iRIyIiUlxcLEajUTw8PMTZ2VkmTJggf/zjH1WFY39kbV6amppk3rx54uvrK0OGDJFRo0bJ8uXLO13IGYw50+Fvf/ub6PV6qamp6fT6gZozIj2fr0Xu7rx06dIliYmJEb1eLz4+PpKSkiKtra02jV3z4wEQEREREVEXuIaZiIiIiMgKFsxERERERFawYCYiIiIisoIFMxERERGRFSyYiYiIiIisYMFMRERERGQFC2YiIiIiIitYMBMRERERWcGCmYhoELp06RI0Gg1KS0tt9h4JCQlYuHChzfZPRNRbWDATEfVDCQkJ0Gg0nbbo6Oi7en1QUBBMJhMmT55s40iJiPo/R3sHQERE/5vo6Gjs2LFD1abT6e7qtQ4ODggICLBFWEREAw6vMBMR9VM6nQ4BAQGqbdiwYQAAjUaDzMxMxMTEQK/XIyQkBH//+9+V1965JOO///0v4uLi4OvrC71ej3HjxqmK8a+++gqPPvoo9Ho9vL29sWLFCjQ0NCj97e3tSE5OhqenJ7y9vfH73/8eIqKK12KxID09HcHBwdDr9QgLC1PF1FMMRET2woKZiGiAWrduHWJjY/HFF18gLi4OTz/9NMrKyrode+7cOXzyyScoKytDZmYmfHx8AACNjY2IiorCsGHDcPr0aezbtw95eXlYtWqV8vo///nPyMnJQXZ2No4dO4YbN25g//79qvdIT0/HO++8g+3bt+Ps2bNYs2YNnn32WRw9erTHGIiI7EqIiKjfiY+PFwcHB3F1dVVtb775poiIAJCVK1eqXmM0GiUxMVFERCorKwWAnDlzRkREHn/8cVm6dGmX7/XWW2/JsGHDpKGhQWk7ePCgaLVaMZvNIiJiMBhk06ZNSn9ra6uMGDFCFixYICIizc3N4uLiIidOnFDte9myZbJkyZIeYyAisieuYSYi6qceeeQRZGZmqtq8vLyUnyMiIlR9ERER3T4VIzExEbGxsSgpKcG8efOwcOFCzJ49GwBQVlaGsLAwuLq6KuPvv/9+WCwWlJeXw9nZGSaTCUajUel3dHTEzJkzlWUZFRUVaGpqwmOPPaZ635aWFkyfPr3HGIiI7IkFMxFRP+Xq6oqxY8fek33FxMTg8uXL+Pjjj5Gbm4u5c+ciKSkJf/rTn+7J/jvWOx88eBDDhw9X9XXcqGjrGIiI/ldcw0xENECdPHmy0+8TJkzodryvry/i4+Oxa9cubN26FW+99RYAYMKECfjiiy/Q2NiojD1+/Di0Wi1CQ0Ph4eEBg8GAoqIipb+trQ3FxcXK7xMnToROp0NVVRXGjh2r2oKCgnqMgYjInniFmYion7p58ybMZrOqzdHRUblRbt++fZg5cyYeeOABvPvuuzh16hSysrK63FdaWhrCw8MxadIk3Lx5EwcOHFCK67i4OKxfvx7x8fF4/fXXce3aNbz00kv49a9/DX9/fwDA6tWrsXHjRowbNw7jx49HRkYGampqlP27ubnh5Zdfxpo1a2CxWPDAAw+gtrYWx48fh7u7O+Lj463GQERkTyyYiYj6qUOHDsFgMKjaQkNDcf78eQDAhg0bsGfPHrz44oswGAzYvXs3Jk6c2OW+nJyckJqaikuXLkGv1+PBBx/Enj17AAAuLi749NNPsXr1atx3331wcXFBbGwsMjIylNenpKTAZDIhPj4eWq0Wzz//PJ588knU1tYqY/7whz/A19cX6enp+Pe//w1PT0/MmDEDr732Wo8xEBHZk0bkjgdlEhFRv6fRaLB//35+NTUR0T3ANcxERERERFawYCYiIiIisoJrmImIBiCutiMiund4hZmIiIiIyAoWzEREREREVrBgJiIiIiKyggUzEREREZEVLJiJiIiIiKxgwUxEREREZAULZiIiIiIiK1gwExERERFZ8f+BjlVYX7wtvgAAAABJRU5ErkJggg==\n" 136 | }, 137 | "metadata": {}, 138 | "output_type": "display_data" 139 | } 140 | ], 141 | "source": [ 142 | "episodes_list = list(range(len(result)))\n", 143 | "plt.figure(figsize=(8,6))\n", 144 | "plt.plot(episodes_list, result)\n", 145 | "plt.xlabel(\"Episodes\")\n", 146 | "plt.ylabel(\"Returns\")\n", 147 | "plt.title(\"DDPG on {}\".format(\"Pendulum v1\"))\n", 148 | "plt.show()" 149 | ], 150 | "metadata": { 151 | "collapsed": false 152 | } 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "outputs": [], 158 | "source": [], 159 | "metadata": { 160 | "collapsed": false 161 | } 162 | } 163 | ], 164 | "metadata": { 165 | "kernelspec": { 166 | "display_name": "Python 3", 167 | "language": "python", 168 | "name": "python3" 169 | }, 170 | "language_info": { 171 | "codemirror_mode": { 172 | "name": "ipython", 173 | "version": 2 174 | }, 175 | "file_extension": ".py", 176 | "mimetype": "text/x-python", 177 | "name": "python", 178 | "nbconvert_exporter": "python", 179 | "pygments_lexer": "ipython2", 180 | "version": "2.7.6" 181 | } 182 | }, 183 | "nbformat": 4, 184 | "nbformat_minor": 0 185 | } 186 | -------------------------------------------------------------------------------- /life/base/__pycache__/q_learning.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/base/__pycache__/q_learning.cpython-37.pyc -------------------------------------------------------------------------------- /life/base/__pycache__/sarsa.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/base/__pycache__/sarsa.cpython-37.pyc -------------------------------------------------------------------------------- /life/base/__pycache__/trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/base/__pycache__/trainer.cpython-37.pyc -------------------------------------------------------------------------------- /life/base/q_learning.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | 4 | 5 | class QLearning: 6 | """Q-Learning算法""" 7 | 8 | def __init__(self, n_state, epsilon, alpha, gamma, n_action=4): 9 | self.Q_table = np.zeros((n_state, n_action)) 10 | self.n_action = n_action 11 | self.epsilon = epsilon 12 | self.alpha = alpha 13 | self.gamma = gamma 14 | 15 | def take_action(self, state): 16 | """根据策略Q选取在state下的最有动作action""" 17 | if np.random.rand() < self.epsilon: 18 | action = np.random.randint(self.n_action) 19 | else: 20 | action = np.argmax(self.Q_table[state]) 21 | return action 22 | 23 | def best_action(self, state): 24 | """训练完成后选择最优动作""" 25 | Q_max = np.max(self.Q_table[state]) 26 | a = [0 for _ in range(self.n_action)] 27 | for i in range(self.n_action): 28 | if self.Q_table[state, i] == Q_max: 29 | a[i] = 1 30 | return a 31 | 32 | def update(self, s0, a0, r, s1): 33 | """更新Q表格""" 34 | td_error = r + self.gamma * self.Q_table[s1].max() - self.Q_table[s0, a0] 35 | self.Q_table[s0, a0] += self.alpha * td_error 36 | 37 | 38 | class DynaQ: 39 | def __init__(self, n_state, epsilon, alpha, gamma, n_planning, n_action=4): 40 | self.Q_table = np.zeros((n_state, n_action)) 41 | self.n_action = n_action 42 | self.alpha = alpha 43 | self.gamma = gamma 44 | self.epsilon = epsilon 45 | self.n_planning = n_planning # 每执行一次Q-learning,执行n_planning次Q-planning 46 | self.model = dict() # 每次在真实环境中收集到新数据,就加入到字典中(如果之前不存在的话) 47 | 48 | def take_action(self, state): 49 | """根据状态选取下一步的动作""" 50 | if np.random.rand() < self.epsilon: 51 | action = np.random.randint(self.n_action) 52 | else: 53 | action = np.argmax(self.Q_table[state]) 54 | return action 55 | 56 | def q_learning(self, s0, a0, r, s1): 57 | """使用Q-learning的方法更新Q表格""" 58 | td_error = r + self.gamma * self.Q_table[s1].max() - self.Q_table[s0, a0] 59 | self.Q_table[s0, a0] += self.alpha * td_error 60 | 61 | def update(self, s0, a0, r, s1): 62 | """Dyna-Q算法的主要部分,更新Q表格 63 | 使用Q-learning更新一次,在使用Q-planning从历史数据中更新n_planning次""" 64 | self.q_learning(s0, a0, r, s1) 65 | self.model[(s0, a0)] = r, s1 # 将新数据加入到model中 66 | for _ in range(self.n_planning): # Q-planning循环 67 | (s, a), (r, s_) = random.choice(list(self.model.items())) # 随机选择之前的数据 68 | self.q_learning(s, a, r, s_) 69 | -------------------------------------------------------------------------------- /life/base/sarsa.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Sarsa: 5 | def __init__(self, n_state, epsilon, alpha, gamma, n_action=4): 6 | """Sarsa算法 7 | 8 | Arguments: 9 | ncol -- 环境列数 10 | nrow -- 环境行数 11 | epsilon -- 随机选择动作的概率 12 | alpha -- 学习率 13 | gamma -- 折扣因子 14 | 15 | Keyword Arguments: 16 | n_action -- 动作的个数 (default: {4}) 17 | """ 18 | self.Q_table = np.zeros((n_state, n_action)) 19 | self.n_action = n_action 20 | self.alpha = alpha 21 | self.epsilon = epsilon 22 | self.gamma = gamma 23 | 24 | def take_action(self, state): 25 | """根据state选择下一步的操作,具体实现为epsilon-贪心""" 26 | if np.random.rand() < self.epsilon: 27 | action = np.random.randint(self.n_action) 28 | else: 29 | action = np.argmax(self.Q_table[state]) 30 | return action 31 | 32 | def best_action(self, state): 33 | """用于打印策略""" 34 | Q_max = np.max(self.Q_table[state]) 35 | a = [0 for _ in range(self.n_action)] 36 | 37 | # 若两个动作的价值一样,都会被记录下来 38 | for i in range(self.n_action): 39 | if self.Q_table[state][i] == Q_max: 40 | a[i] = 1 41 | return a 42 | 43 | def update(self, s0, a0, r, s1, a1): 44 | """"更新Q表格""" 45 | td_error = r + self.gamma * self.Q_table[s1, a1] - self.Q_table[s0, a0] # 时序差分误差 46 | self.Q_table[s0, a0] += self.alpha * td_error 47 | 48 | 49 | class MultiSarsa: 50 | """n步Sarsa算法""" 51 | 52 | def __init__(self, n, n_state, epsilon, alpha, gamma, n_action=4): 53 | self.Q_table = np.zeros((n_state, n_action)) 54 | self.n_action = n_action 55 | self.alpha = alpha 56 | self.gamma = gamma 57 | self.epsilon = epsilon 58 | self.n = n # 采用n步Sarsa算法 59 | self.state_list = [] # 保存之前的状态 60 | self.action_list = [] # 保存之前的动作 61 | self.reward_list = [] # 保存之前的奖励 62 | 63 | def take_action(self, state): 64 | """根据状态图选取一个动作""" 65 | if np.random.rand() < self.epsilon: 66 | action = np.random.randint(self.n_action) 67 | else: 68 | action = np.argmax(self.Q_table[state]) 69 | return action 70 | 71 | def best_action(self, state): 72 | """用于输出state下的最优动作(训练完成后)""" 73 | Q_max = np.max(self.Q_table[state]) 74 | a = [0 for _ in range(self.n_action)] 75 | for i in range(self.n_action): 76 | if self.Q_table[state, i] == Q_max: 77 | a[i] = 1 78 | return a 79 | 80 | def update(self, s0, a0, r, s1, a1, done): 81 | """基于Sarsa算法,更新Q表格""" 82 | self.state_list.append(s0) 83 | self.action_list.append(a0) 84 | self.reward_list.append(r) 85 | 86 | if len(self.state_list) == self.n: # 若保存的数据可以进行n步更新 87 | G = self.Q_table[s1, a1] # 得到Q(s_{t+n},a_{t+n}) 88 | for i in reversed(range(self.n)): # 不断向前计算每一步的回报,并折扣累加 89 | G = self.gamma * G + self.reward_list[i] 90 | if done and i > 0: # 虽然最后几步没有到达n步,但是到达了终止状态,也将其更新 91 | s = self.state_list[i] 92 | a = self.action_list[i] 93 | self.Q_table[s, a] += self.alpha * (G - self.Q_table[s, a]) 94 | s = self.state_list.pop(0) # s_t 95 | a = self.action_list.pop(0) # a_t 96 | self.reward_list.pop(0) # r_t 97 | # n步Sarsa的主要更新步骤 98 | self.Q_table[s, a] += self.alpha * (G - self.Q_table[s, a]) 99 | if done: 100 | # 到达终止状态,即将开始下一个序列,将列表清空 101 | self.state_list.clear() 102 | self.action_list.clear() 103 | self.reward_list.clear() 104 | -------------------------------------------------------------------------------- /life/base/trainer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tqdm import tqdm 3 | 4 | 5 | def train_sarsa(env, agent, num_episodes=500, return_agent=False): 6 | """ 7 | 8 | :param env: 9 | :param agent: 10 | :param num_episodes: 11 | :param return_agent: 12 | :return: 13 | """ 14 | return_list = [] # 记录每一条序列的回报 15 | for i in range(10): # 显示10个进度条 16 | # tqdm的进度条功能 17 | with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar: 18 | for i_episode in range(int(num_episodes / 10)): # 每个进度条的序列数 19 | episode_return = 0 20 | state = env.reset() 21 | action = agent.take_action(state) 22 | done = False 23 | while not done: 24 | next_state, reward, done = env.step(action) 25 | next_action = agent.take_action(next_state) 26 | episode_return += reward # 这里回报的计算不进行折扣因子衰减 27 | agent.update(state, action, reward, next_state, next_action) 28 | state = next_state 29 | action = next_action 30 | return_list.append(episode_return) 31 | if (i_episode + 1) % 10 == 0: # 每10条序列打印一下这10条序列的平均回报 32 | pbar.set_postfix({ 33 | 'episode': 34 | '%d' % (num_episodes / 10 * i + i_episode + 1), 35 | 'return': 36 | '%.3f' % np.mean(return_list[-10:]) 37 | }) 38 | pbar.update(1) 39 | if return_agent: 40 | return return_list, agent 41 | return return_list 42 | 43 | 44 | def train_multi_sarsa(env, agent, num_episodes=500, return_agent=False): 45 | return_list = [] # 记录每一条序列的回报 46 | for i in range(10): # 显示10个进度条 47 | # tqdm的进度条功能 48 | with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar: 49 | for i_episode in range(int(num_episodes / 10)): # 每个进度条的序列数 50 | episode_return = 0 51 | state = env.reset() 52 | action = agent.take_action(state) 53 | done = False 54 | while not done: 55 | next_state, reward, done = env.step(action) 56 | next_action = agent.take_action(next_state) 57 | episode_return += reward # 这里回报的计算不进行折扣因子衰减 58 | agent.update(state, action, reward, next_state, next_action, 59 | done) 60 | state = next_state 61 | action = next_action 62 | return_list.append(episode_return) 63 | if (i_episode + 1) % 10 == 0: # 每10条序列打印一下这10条序列的平均回报 64 | pbar.set_postfix({ 65 | 'episode': 66 | '%d' % (num_episodes / 10 * i + i_episode + 1), 67 | 'return': 68 | '%.3f' % np.mean(return_list[-10:]) 69 | }) 70 | pbar.update(1) 71 | if return_agent: 72 | return return_list, agent 73 | return return_list 74 | 75 | 76 | def train_qlearning(env, agent, num_episodes=500, return_agent=False): 77 | """""" 78 | return_list = [] 79 | for i in range(10): # 显示10个进度条 80 | with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar: 81 | for i_episode in range(int(num_episodes / 10)): # 每个进度条的序列数 82 | episode_return = 0 # 初始化一个回合的回报 83 | # 初始化状态 动作 done 84 | state = env.reset() # 初始状态 85 | done = False 86 | 87 | while not done: 88 | action = agent.take_action(state) 89 | next_state, reward, done = env.step(action) # 智能体与环境交互 90 | episode_return += reward 91 | agent.update(state, action, reward, next_state) 92 | state = next_state 93 | return_list.append(episode_return) 94 | if (i_episode + 1) % 10 == 0: 95 | pbar.set_postfix({"episode": "%d" % (num_episodes / 10 * i + i_episode + 1), 96 | "return": "%.3f" % np.mean(return_list[-10:])}) 97 | pbar.update(1) 98 | if return_agent: 99 | return return_list, agent 100 | return return_list 101 | -------------------------------------------------------------------------------- /life/dqn/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------- /life/dqn/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/dqn/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /life/dqn/__pycache__/dqn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/dqn/__pycache__/dqn.cpython-37.pyc -------------------------------------------------------------------------------- /life/dqn/__pycache__/dqn_improved.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/dqn/__pycache__/dqn_improved.cpython-37.pyc -------------------------------------------------------------------------------- /life/dqn/__pycache__/trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/dqn/__pycache__/trainer.cpython-37.pyc -------------------------------------------------------------------------------- /life/dqn/dqn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import numpy as np 4 | 5 | 6 | class Qnet(torch.nn.Module): 7 | ''' 只有一层隐藏层的Q网络 ''' 8 | 9 | def __init__(self, state_dim, hidden_dim, action_dim): 10 | super(Qnet, self).__init__() 11 | self.fc1 = torch.nn.Linear(state_dim, hidden_dim) 12 | self.fc2 = torch.nn.Linear(hidden_dim, action_dim) 13 | 14 | def forward(self, x): 15 | x = F.relu(self.fc1(x)) # 隐藏层使用ReLU激活函数 16 | return self.fc2(x) 17 | 18 | 19 | class DQN: 20 | ''' DQN算法 ''' 21 | 22 | def __init__(self, state_dim, hidden_dim, action_dim, learning_rate, gamma, 23 | epsilon, target_update, device, q_net=Qnet): 24 | """ 25 | 26 | :param state_dim: 27 | :param hidden_dim: 28 | :param action_dim: 29 | :param learning_rate: 30 | :param gamma: 31 | :param epsilon: 32 | :param target_update: 33 | :param device:torch的device 34 | :param q_net: 计算q值的网络,默认为2层的全连接神经网络,也可以自己定义网络 35 | """ 36 | self.action_dim = action_dim 37 | self.q_net = q_net(state_dim, hidden_dim, 38 | self.action_dim).to(device) # Q网络 39 | # 目标网络 40 | self.target_q_net = q_net(state_dim, hidden_dim, 41 | self.action_dim).to(device) 42 | # 使用Adam优化器 43 | self.optimizer = torch.optim.Adam(self.q_net.parameters(), 44 | lr=learning_rate) 45 | self.gamma = gamma # 折扣因子 46 | self.epsilon = epsilon # epsilon-贪婪策略 47 | self.target_update = target_update # 目标网络更新频率 48 | self.count = 0 # 计数器,记录更新次数 49 | self.device = device 50 | 51 | def take_action(self, state): # epsilon-贪婪策略采取动作 52 | if np.random.random() < self.epsilon: 53 | action = np.random.randint(self.action_dim) 54 | else: 55 | state = torch.tensor([state], dtype=torch.float).to(self.device) 56 | action = self.q_net(state).argmax().item() 57 | return action 58 | 59 | def max_q_value(self, state): 60 | state = torch.tensor([state], dtype=torch.float).to(self.device) 61 | return self.q_net(state).max().item() 62 | 63 | def update(self, transition_dict): 64 | states = torch.tensor(transition_dict['states'], 65 | dtype=torch.float).to(self.device) 66 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 67 | self.device) 68 | rewards = torch.tensor(transition_dict['rewards'], 69 | dtype=torch.float).view(-1, 1).to(self.device) 70 | next_states = torch.tensor(transition_dict['next_states'], 71 | dtype=torch.float).to(self.device) 72 | dones = torch.tensor(transition_dict['dones'], 73 | dtype=torch.float).view(-1, 1).to(self.device) 74 | 75 | q_values = self.q_net(states).gather(1, actions) # Q值 76 | # 下个状态的最大Q值 77 | max_next_q_values = self.target_q_net(next_states).max(1)[0].view( 78 | -1, 1) 79 | q_targets = rewards + self.gamma * max_next_q_values * (1 - dones 80 | ) # TD误差目标 81 | dqn_loss = torch.mean(F.mse_loss(q_values, q_targets)) # 均方误差损失函数 82 | self.optimizer.zero_grad() # PyTorch中默认梯度会累积,这里需要显式将梯度置为0 83 | dqn_loss.backward() # 反向传播更新参数 84 | self.optimizer.step() 85 | 86 | if self.count % self.target_update == 0: 87 | self.target_q_net.load_state_dict( 88 | self.q_net.state_dict()) # 更新目标网络 89 | self.count += 1 90 | -------------------------------------------------------------------------------- /life/dqn/dqn_improved.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn.functional as F 4 | from .dqn import Qnet 5 | 6 | 7 | class DoubleDQN: 8 | """Double DQN 算法""" 9 | 10 | def __init__(self, 11 | state_dim, 12 | hidden_dim, 13 | action_dim, 14 | learning_rate, 15 | gamma, 16 | epsilon, 17 | target_update, 18 | device, 19 | q_net=Qnet): 20 | self.action_dim = action_dim 21 | self.q_net = q_net(state_dim, hidden_dim, self.action_dim).to(device) 22 | self.target_q_net = q_net(state_dim, hidden_dim, 23 | self.action_dim).to(device) 24 | self.optimizer = torch.optim.Adam(self.q_net.parameters(), 25 | lr=learning_rate) 26 | self.gamma = gamma 27 | self.epsilon = epsilon 28 | self.target_update = target_update 29 | self.count = 0 30 | self.device = device 31 | 32 | def take_action(self, state): 33 | if np.random.random() < self.epsilon: 34 | action = np.random.randint(self.action_dim) 35 | else: 36 | state = torch.tensor([state], dtype=torch.float).to(self.device) 37 | action = self.q_net(state).argmax().item() 38 | return action 39 | 40 | def max_q_value(self, state): 41 | """在一堆Q值中寻找最大的Q""" 42 | state = torch.tensor([state], dtype=torch.float).to(self.device) 43 | return self.q_net(state).max().item() 44 | 45 | def update(self, transition_dict): 46 | states = torch.tensor(transition_dict['states'], 47 | dtype=torch.float).to(self.device) 48 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 49 | self.device) 50 | rewards = torch.tensor(transition_dict['rewards'], 51 | dtype=torch.float).view(-1, 1).to(self.device) 52 | next_states = torch.tensor(transition_dict['next_states'], 53 | dtype=torch.float).to(self.device) 54 | dones = torch.tensor(transition_dict['dones'], 55 | dtype=torch.float).view(-1, 1).to(self.device) 56 | 57 | q_values = self.q_net(states).gather(1, actions) # q值的计算都是一样的 58 | max_action = self.q_net(next_states).max(1)[1].view(-1, 1) # 动作选择有q-net负责 59 | max_next_q_values = self.target_q_net(next_states).gather(1, max_action) # Q值计算由t-net负责 60 | 61 | q_targets = rewards + self.gamma * max_next_q_values * (1 - dones) # q_target的计算也是一样的 62 | 63 | # 计算loss 64 | loss = torch.mean(F.mse_loss(q_values, q_targets)) 65 | self.optimizer.zero_grad() 66 | loss.backward() 67 | self.optimizer.step() 68 | 69 | if self.count % self.target_update == 0: 70 | self.target_q_net.load_state_dict( 71 | self.q_net.state_dict()) # 更新目标网络 72 | self.count += 1 73 | 74 | 75 | class VAnet(torch.nn.Module): 76 | """只有一层隐藏层的A网络和V网络""" 77 | 78 | def __init__(self, state_dim, hidden_dim, action_dim): 79 | super(VAnet, self).__init__() 80 | self.fc1 = torch.nn.Linear(state_dim, hidden_dim) # 共享网络部分 81 | self.fc_A = torch.nn.Linear(hidden_dim, action_dim) 82 | self.fc_V = torch.nn.Linear(hidden_dim, 1) 83 | 84 | def forward(self, x): 85 | A = self.fc_A(F.relu(self.fc1(x))) 86 | V = self.fc_V(F.relu(self.fc1(x))) 87 | Q = V + A - A.mean(1).view(-1, 1) # Q值由V值和A值计算得到 88 | return Q 89 | 90 | 91 | class DuelingDQN: 92 | """Dueling DQN算法""" 93 | def __init__(self, 94 | state_dim, 95 | hidden_dim, 96 | action_dim, 97 | learning_rate, 98 | gamma, 99 | epsilon, 100 | target_update, 101 | device): 102 | self.action_dim = action_dim 103 | self.q_net = VAnet(state_dim, hidden_dim, 104 | self.action_dim).to(device) 105 | self.target_q_net = VAnet(state_dim, hidden_dim, 106 | self.action_dim).to(device) 107 | self.optimizer = torch.optim.Adam(self.q_net.parameters(), 108 | lr=learning_rate) 109 | self.gamma = gamma 110 | self.epsilon = epsilon 111 | self.target_update = target_update 112 | self.count = 0 113 | self.device = device 114 | 115 | def take_action(self, state): 116 | if np.random.random() < self.epsilon: 117 | action = np.random.randint(self.action_dim) 118 | else: 119 | state = torch.tensor([state], dtype=torch.float).to(self.device) 120 | action = self.q_net(state).argmax().item() 121 | return action 122 | 123 | def max_q_value(self, state): 124 | state = torch.tensor([state], dtype=torch.float).to(self.device) 125 | return self.q_net(state).max().item() 126 | 127 | def update(self, transition_dict): 128 | states = torch.tensor(transition_dict['states'], 129 | dtype=torch.float).to(self.device) 130 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 131 | self.device) 132 | rewards = torch.tensor(transition_dict['rewards'], 133 | dtype=torch.float).view(-1, 1).to(self.device) 134 | next_states = torch.tensor(transition_dict['next_states'], 135 | dtype=torch.float).to(self.device) 136 | dones = torch.tensor(transition_dict['dones'], 137 | dtype=torch.float).view(-1, 1).to(self.device) 138 | 139 | q_values = self.q_net(states).gather(1, actions) 140 | 141 | max_next_q_values = self.target_q_net(next_states).max(1)[0].view(-1, 1) 142 | q_targets = rewards + self.gamma * max_next_q_values * (1 - dones) 143 | dqn_loss = torch.mean(F.mse_loss(q_values, q_targets)) 144 | self.optimizer.zero_grad() 145 | dqn_loss.backward() 146 | self.optimizer.step() 147 | 148 | if self.count % self.target_update == 0: 149 | self.target_q_net.load_state_dict(self.q_net.state_dict()) 150 | self.count += 1 151 | -------------------------------------------------------------------------------- /life/dqn/trainer.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | from life.utils.cont2disp import dis2con 3 | import numpy as np 4 | 5 | 6 | def train_dqn(agent, env, replay_buffer, minimal_size, batch_size, num_episodes=500, 7 | conti_action=False, return_agent=False): 8 | """ 9 | 训练各种DQN 10 | :param agent: 11 | :param env: 12 | :param num_episodes: 13 | :param replay_buffer: 14 | :param minimal_size: replay_buffer只有超过了minimal_size,才开始训练 15 | :param batch_size: 16 | :param conti_action: 是否用于连续动作 17 | :param return_agent: 是否返回智能体 18 | :return: 19 | """ 20 | return_list = [] 21 | for i in range(10): 22 | with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar: 23 | for i_episode in range(int(num_episodes / 10)): 24 | episode_return = 0 25 | state = env.reset() 26 | done = False 27 | while not done: 28 | action = agent.take_action(state) 29 | next_state, reward, done, _ = env.step(action) 30 | replay_buffer.add(state, action, reward, next_state, done) 31 | state = next_state 32 | episode_return += reward 33 | # 当buffer数据的数量超过一定值后,才进行Q网络训练 34 | if replay_buffer.size() > minimal_size: 35 | b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size) 36 | transition_dict = { 37 | 'states': b_s, 38 | 'actions': b_a, 39 | 'next_states': b_ns, 40 | 'rewards': b_r, 41 | 'dones': b_d 42 | } 43 | agent.update(transition_dict) 44 | return_list.append(episode_return) 45 | if (i_episode + 1) % 10 == 0: 46 | pbar.set_postfix({ 47 | 'episode': 48 | '%d' % (num_episodes / 10 * i + i_episode + 1), 49 | 'return': 50 | '%.3f' % np.mean(return_list[-10:]) 51 | }) 52 | pbar.update(1) 53 | if return_agent: 54 | return return_list, agent 55 | return return_list 56 | 57 | 58 | def train(agent, env, replay_buffer, minimal_size, 59 | batch_size, num_episodes=500, con_act=False,return_agent=False): 60 | return_list = [] 61 | max_q_value_list = [] 62 | max_q_value = 0 63 | for i in range(10): 64 | with tqdm(total=int(num_episodes / 10), 65 | desc='Iteration %d' % i) as pbar: 66 | for i_episode in range(int(num_episodes / 10)): 67 | episode_return = 0 68 | state = env.reset() 69 | done = False 70 | while not done: 71 | action = agent.take_action(state) 72 | max_q_value = agent.max_q_value( 73 | state) * 0.005 + max_q_value * 0.995 # 平滑处理 74 | max_q_value_list.append(max_q_value) # 保存每个状态的最大Q值 75 | if con_act: 76 | action_continuous = dis2con(action, env, 77 | agent.action_dim) 78 | next_state, reward, done, _ = env.step([action_continuous]) 79 | else: 80 | next_state, reward, done, _ = env.step(action) # 用于离散动作的DQN不需加[] 81 | replay_buffer.add(state, action, reward, next_state, done) 82 | state = next_state 83 | episode_return += reward 84 | if replay_buffer.size() > minimal_size: 85 | b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample( 86 | batch_size) 87 | transition_dict = { 88 | 'states': b_s, 89 | 'actions': b_a, 90 | 'next_states': b_ns, 91 | 'rewards': b_r, 92 | 'dones': b_d 93 | } 94 | agent.update(transition_dict) 95 | return_list.append(episode_return) 96 | if (i_episode + 1) % 10 == 0: 97 | pbar.set_postfix({ 98 | 'episode': 99 | '%d' % (num_episodes / 10 * i + i_episode + 1), 100 | 'return': 101 | '%.3f' % np.mean(return_list[-10:]) 102 | }) 103 | pbar.update(1) 104 | if return_agent: 105 | return return_list, agent 106 | return return_list 107 | -------------------------------------------------------------------------------- /life/envs/__pycache__/cliffwalking.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/envs/__pycache__/cliffwalking.cpython-37.pyc -------------------------------------------------------------------------------- /life/envs/__pycache__/con_env_demo.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/envs/__pycache__/con_env_demo.cpython-37.pyc -------------------------------------------------------------------------------- /life/envs/__pycache__/dis_env_demo.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/envs/__pycache__/dis_env_demo.cpython-37.pyc -------------------------------------------------------------------------------- /life/envs/cliffwalking.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | 4 | class CliffWalkingEnv: 5 | def __init__(self, ncol, nrow): 6 | self.nrow = nrow 7 | self.ncol = ncol 8 | self.x = 0 # 记录当前智能体位置的横坐标 9 | self.y = self.nrow - 1 # 记录当前智能体位置的纵坐标 10 | 11 | def step(self, action): # 外部调用这个函数来改变当前位置 12 | # 4种动作, change[0]:上, change[1]:下, change[2]:左, change[3]:右。坐标系原点(0,0) 13 | # 定义在左上角 14 | change = [[0, -1], [0, 1], [-1, 0], [1, 0]] 15 | self.x = min(self.ncol - 1, max(0, self.x + change[action][0])) 16 | self.y = min(self.nrow - 1, max(0, self.y + change[action][1])) 17 | next_state = self.y * self.ncol + self.x 18 | reward = -1 19 | done = False 20 | if self.y == self.nrow - 1 and self.x > 0: # 下一个位置在悬崖或者目标 21 | done = True 22 | if self.x != self.ncol - 1: 23 | reward = -100 24 | return next_state, reward, done 25 | 26 | def reset(self): # 回归初始状态,坐标轴原点在左上角 27 | self.x = 0 28 | self.y = self.nrow - 1 29 | return self.y * self.ncol + self.x 30 | -------------------------------------------------------------------------------- /life/envs/con_env_demo.py: -------------------------------------------------------------------------------- 1 | import gym 2 | 3 | 4 | def make(): 5 | return gym.make("Pendulum-v1") -------------------------------------------------------------------------------- /life/envs/dis_env_demo.py: -------------------------------------------------------------------------------- 1 | import gym 2 | 3 | 4 | def make(): 5 | return gym.make('CartPole-v0') 6 | -------------------------------------------------------------------------------- /life/imitation/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------- /life/imitation/bc.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from ..policy.ppo import PolicyNet 3 | 4 | 5 | class BehaviorClone: 6 | def __init__(self, state_dim, hidden_dim, action_dim, lr, device, policy_net=PolicyNet): 7 | self.policy = policy_net(state_dim, hidden_dim, action_dim).to(device) 8 | self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr) 9 | self.device = device 10 | 11 | def learn(self, states, actions): 12 | """policy net 学习,参数更新""" 13 | states = torch.tensor(states, dtype=torch.float).to(self.device) 14 | actions = torch.tensor(actions, dtype=torch.int64).view(-1, 1).to(self.device) 15 | log_probs = torch.log(self.policy(states).gather(1, actions)) # 注意这里的损失函数计算方式 16 | bc_loss = torch.mean(-log_probs) # 最大似然估计 17 | 18 | self.optimizer.zero_grad() 19 | bc_loss.backward() 20 | self.optimizer.step() 21 | 22 | def take_action(self, state): 23 | state = torch.tensor([state], dtype=torch.float).to(self.device) 24 | probs = self.policy(state) 25 | action_dist = torch.distributions.Categorical(probs) 26 | action = action_dist.sample() 27 | return action.item() 28 | -------------------------------------------------------------------------------- /life/imitation/gail.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class Discriminator(nn.Module): 7 | """判别器模型""" 8 | 9 | def __init__(self, state_dim, hidden_dim, action_dim) -> None: 10 | super().__init__() 11 | self.fc1 = nn.Linear(state_dim + action_dim, hidden_dim) 12 | self.fc2 = nn.Linear(hidden_dim, 1) 13 | 14 | def forward(self, x, a): 15 | cat = torch.cat([x, a], dim=1) 16 | x = F.relu(self.fc1(cat)) 17 | return torch.sigmoid(self.fc2(x)) # 输出的是一个概率标量 18 | 19 | 20 | class GAIL: 21 | def __init__(self, agent, state_dim, action_dim, hidden_dim, lr_d, device, discriminator=Discriminator): 22 | self.dicriminator = discriminator(state_dim, hidden_dim, action_dim).to(device) 23 | self.dicriminator_optimizer = torch.optim.Adam(self.dicriminator.parameters(), lr=lr_d) 24 | self.agent = agent 25 | self.device = device 26 | 27 | def learn(self, expert_s, expert_a, agent_s, agent_a, next_s, dones): 28 | expert_states = torch.tensor(expert_s, dtype=torch.float).to(self.device) 29 | expert_actions = torch.tensor(expert_a).to(self.device) 30 | agent_states = torch.tensor(agent_s, dtype=torch.float).to(self.device) 31 | agent_actions = torch.tensor(agent_a).to(self.device) 32 | 33 | expert_actions = F.one_hot(expert_actions.to(torch.int64), num_classes=2).float() # 两个动作 34 | agent_actions = F.one_hot(agent_actions.to(torch.int64), num_classes=2).float() 35 | 36 | expert_prob = self.dicriminator(expert_states, expert_actions) # 前向传播,输出数据来自于专家的概率 37 | agent_prob = self.dicriminator(agent_states, agent_actions) 38 | # 计算判别器的损失 39 | discriminator_loss = nn.BCELoss()(agent_prob, torch.ones_like(agent_prob)) + \ 40 | nn.BCELoss()(expert_prob, torch.zeros_like(expert_prob)) 41 | # 优化更新 42 | self.dicriminator_optimizer.zero_grad() 43 | discriminator_loss.backward() 44 | self.dicriminator_optimizer.step() 45 | 46 | # 将判别器的输出转换为策略的奖励信号 47 | rewards = -torch.log(agent_prob).detach().cpu().numpy() 48 | transition_dict = { 49 | 'states': agent_s, 50 | 'actions': agent_a, 51 | 'rewards': rewards, # 只有rewards改变了,换成了 概率(被判别器识破的概率) 52 | 'next_states': next_s, 53 | 'dones': dones 54 | } 55 | self.agent.update(transition_dict) 56 | -------------------------------------------------------------------------------- /life/imitation/trainer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tqdm import tqdm 3 | 4 | 5 | def test_agent(agent, env, n_episode): 6 | """ 7 | 对智能体进行episode次测试,记录每个回合的reward,返回其平均值 8 | """ 9 | return_list = [] 10 | for episode in range(n_episode): 11 | episode_return = 0 12 | state = env.reset() 13 | done = False 14 | 15 | while not done: 16 | action = agent.take_action(state) 17 | next_state, reward, done, _ = env.step(action) 18 | state = next_state 19 | episode_return += reward 20 | return_list.append(episode_return) 21 | return np.mean(return_list) 22 | 23 | 24 | def train_bc(bc_agent, env, expert_s, expert_a, n_iterations, batch_size, return_agent=False): 25 | """训练bc算法的函数""" 26 | test_returns = [] 27 | 28 | with tqdm(total=n_iterations, desc="进度条") as pbar: 29 | for i in range(n_iterations): 30 | sample_indices = np.random.randint(0, expert_s.shape[0], size=batch_size) 31 | expert_s_sample_batch = expert_s[sample_indices] # 含有重复数据,如本例是从30条经验数据中采样64个 32 | expert_a_sample_batch = expert_a[sample_indices] 33 | 34 | bc_agent.learn(expert_s_sample_batch, expert_a_sample_batch) # 有监督的智能体学习 35 | 36 | current_return = test_agent(bc_agent, env, 5) 37 | test_returns.append(current_return) 38 | if (i + 1) % 10 == 0: 39 | pbar.set_postfix({"return": "%.3f" % np.mean(test_returns[-10:])}) 40 | pbar.update(1) 41 | if return_agent: 42 | return test_returns, bc_agent 43 | return test_returns 44 | 45 | 46 | def train_gail(agent, gail, env, expert_s, expert_a, n_episode=500, return_agent=False): 47 | """ 48 | gail算法的训练函数 49 | :param agent: 需要与环境交互的智能体,同时也是要传入gail算法类的智能体 50 | :param gail: GAIL算法类 51 | :param env: 52 | :param expert_s: 专家数据(s,a)中的s 53 | :param expert_a: 专家数据(s,a)中的a 54 | :param n_episode: 55 | :param return_agent: 56 | :return: 57 | """ 58 | return_list = [] 59 | 60 | with tqdm(total=n_episode, desc="进度条") as pbar: 61 | for i in range(n_episode): 62 | episode_return = 0 63 | state = env.reset() 64 | done = False 65 | state_list = [] 66 | action_list = [] 67 | next_state_list = [] 68 | done_list = [] 69 | 70 | while not done: 71 | action = agent.take_action(state) # 也可换成gail.agent 72 | next_state, reward, done, _ = env.step(action) 73 | state_list.append(state) 74 | action_list.append(action) 75 | next_state_list.append(next_state) 76 | done_list.append(done) 77 | episode_return += reward 78 | state = next_state 79 | return_list.append(episode_return) 80 | 81 | gail.learn(expert_s, expert_a, # 之前的那30条专家数据 82 | state_list, action_list, next_state_list, done_list) 83 | 84 | if (i + 1) % 10 == 0: 85 | pbar.set_postfix({'return': '%.3f' % np.mean(return_list[-10:])}) 86 | pbar.update(1) 87 | if return_agent: 88 | return return_list, gail.agent 89 | return return_list 90 | -------------------------------------------------------------------------------- /life/policy/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------- /life/policy/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/policy/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /life/policy/__pycache__/ddpg.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/policy/__pycache__/ddpg.cpython-37.pyc -------------------------------------------------------------------------------- /life/policy/__pycache__/ppo.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/policy/__pycache__/ppo.cpython-37.pyc -------------------------------------------------------------------------------- /life/policy/__pycache__/reinforce.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/policy/__pycache__/reinforce.cpython-37.pyc -------------------------------------------------------------------------------- /life/policy/__pycache__/sac.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/policy/__pycache__/sac.cpython-37.pyc -------------------------------------------------------------------------------- /life/policy/__pycache__/trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/policy/__pycache__/trainer.cpython-37.pyc -------------------------------------------------------------------------------- /life/policy/ac/a3c.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/policy/ac/a3c.py -------------------------------------------------------------------------------- /life/policy/ac/ac.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import torch.nn.functional as F 4 | from ..reinforce import PolicyNet 5 | 6 | 7 | class ValueNet(nn.Module): 8 | def __init__(self, state_dim, hidden_dim): 9 | super().__init__() 10 | self.fc1 = nn.Linear(state_dim, hidden_dim) 11 | self.fc2 = nn.Linear(hidden_dim, 1) 12 | 13 | def forward(self, state): 14 | x = F.relu(self.fc1(state)) 15 | return self.fc2(x) # 注意这是一个回归问题 16 | 17 | 18 | class ActorCritic: 19 | def __init__(self, state_dim, hidden_dim, action_dim, actor_lr, critic_lr, gamma, device, 20 | policy_net=PolicyNet, value_net=ValueNet): 21 | # 定义策略网络 和 价值网络 22 | self.actor = policy_net(state_dim, hidden_dim, action_dim).to(device) 23 | self.critic = value_net(state_dim, hidden_dim).to(device) 24 | 25 | # 分别为两个网络建立优化器 26 | self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr) 27 | self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr) 28 | 29 | self.gamma = gamma 30 | self.device = device 31 | 32 | def take_action(self, state): 33 | state = torch.tensor([state], dtype=torch.float).to(self.device) 34 | probs = self.actor(state) 35 | action_dist = torch.distributions.Categorical(probs) # 根据概率大小采样 36 | action = action_dist.sample() 37 | return action.item() # 输出标量 38 | 39 | def update(self, transition_dict): 40 | states = torch.tensor(transition_dict['states'], 41 | dtype=torch.float).to(self.device) 42 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 43 | self.device) 44 | rewards = torch.tensor(transition_dict['rewards'], 45 | dtype=torch.float).view(-1, 1).to(self.device) 46 | next_states = torch.tensor(transition_dict['next_states'], 47 | dtype=torch.float).to(self.device) 48 | dones = torch.tensor(transition_dict['dones'], 49 | dtype=torch.float).view(-1, 1).to(self.device) 50 | 51 | td_target = rewards + self.gamma * self.critic(next_states) * (1 - dones) # 时序差分目标 52 | td_delta = td_target - self.critic(states) # 时序差分误差 53 | log_probs = torch.log(self.actor(states).gather(1, actions)) 54 | 55 | # 计算两个网络的loss 56 | actor_loss = torch.mean(-log_probs * td_delta.detach()) # 策略的损失函数 57 | critic_loss = torch.mean(F.mse_loss(self.critic(states), td_target.detach())) 58 | 59 | # 更新网络参数 60 | self.actor_optimizer.zero_grad() 61 | self.critic_optimizer.zero_grad() 62 | 63 | # 误差反向传播 64 | actor_loss.backward() 65 | critic_loss.backward() 66 | 67 | # 优化器step() 68 | self.actor_optimizer.step() 69 | self.critic_optimizer.step() 70 | -------------------------------------------------------------------------------- /life/policy/ddpg.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | 7 | class TwoLayerFC(nn.Module): 8 | def __init__(self, num_in, num_out, hidden_dim, activation=F.relu, out_fn=lambda x: x) -> None: 9 | super().__init__() 10 | self.fc1 = nn.Linear(num_in, hidden_dim) 11 | self.fc2 = nn.Linear(hidden_dim, hidden_dim) 12 | self.fc3 = nn.Linear(hidden_dim, num_out) 13 | self.activation = activation 14 | self.out_fn = out_fn 15 | 16 | def forward(self, x): 17 | x = self.activation(self.fc1(x)) 18 | x = self.activation(self.fc2(x)) 19 | x = self.out_fn(self.fc3(x)) 20 | return x 21 | 22 | 23 | class DDPG: 24 | def __init__(self, num_in_actor, num_out_actor, num_in_critic, hidden_dim, 25 | discrete, action_bound, sigma, actor_lr, critic_lr, 26 | tau, gamma, device, common_net=TwoLayerFC): 27 | """ 28 | 第一行是神经网络结构上的超参数 29 | discrete:是否用于处理离散动作 30 | action_bound:限制动作取值范围 31 | sigma:用于添加高斯噪声的高斯分布参数 32 | tau:软更新目标网络的参数 33 | gamma:衰减因子 34 | """ 35 | out_fn = (lambda x: x) if discrete else ( 36 | lambda x: torch.tanh(x) * action_bound) 37 | self.actor = common_net(num_in_actor, num_out_actor, hidden_dim, 38 | activation=F.relu, out_fn=out_fn).to(device) 39 | self.target_actor = common_net(num_in_actor, num_out_actor, hidden_dim, 40 | activation=F.relu, out_fn=out_fn).to(device) 41 | self.critic = common_net(num_in_critic, 1, hidden_dim).to(device) 42 | self.target_critic = common_net( 43 | num_in_critic, 1, hidden_dim).to(device) 44 | 45 | # 设置目标价值网络并设置和价值网络相同的参数 46 | self.target_critic.load_state_dict(self.critic.state_dict()) 47 | # 初始化目标策略网略并设置和策略相同的参数 48 | self.target_actor.load_state_dict(self.actor.state_dict()) 49 | 50 | self.actor_optimizer = torch.optim.Adam( 51 | self.actor.parameters(), lr=actor_lr) 52 | self.critic_optimizer = torch.optim.Adam( 53 | self.critic.parameters(), lr=critic_lr) 54 | self.gamma = gamma 55 | self.sigma = sigma # 高斯噪声的标准差,均值直接设为0 56 | self.action_bound = action_bound 57 | self.tau = tau # 目标网络软更新参数 58 | self.action_dim = num_out_actor 59 | self.device = device 60 | 61 | def take_action(self, state): 62 | """输入状态,输出带有噪声的动作""" 63 | state = torch.tensor([state], dtype=torch.float).to(self.device) 64 | action = self.actor(state).item() 65 | # 给动作添加噪声,增加探索 66 | action = action + self.gamma * np.random.randn(self.action_dim) 67 | return action 68 | 69 | def soft_update(self, net, target_net): 70 | for param_target, param in zip(target_net.parameters(), net.parameters()): 71 | param_target.data.copy_( 72 | param_target.data * (1 - self.tau) + param.data * self.tau) 73 | 74 | def update(self, transition_dict): 75 | states = torch.tensor( 76 | transition_dict['states'], dtype=torch.float).to(self.device) 77 | actions = torch.tensor( 78 | transition_dict['actions'], dtype=torch.float).view(-1, 1).to(self.device) 79 | rewards = torch.tensor( 80 | transition_dict['rewards'], dtype=torch.float).view(-1, 1).to(self.device) 81 | next_states = torch.tensor( 82 | transition_dict['next_states'], dtype=torch.float).to(self.device) 83 | dones = torch.tensor( 84 | transition_dict['dones'], dtype=torch.float).view(-1, 1).to(self.device) 85 | 86 | # 计算critic loss 87 | next_q_values = self.target_critic(torch.cat([next_states, 88 | self.target_actor(next_states)], 89 | dim=1)) # Q_{w-} 90 | q_targets = rewards + self.gamma * next_q_values * (1 - dones) 91 | critic_loss = torch.mean(F.mse_loss( 92 | self.critic(torch.cat([states, actions], dim=1)), 93 | q_targets 94 | )) 95 | # 优化 96 | self.critic_optimizer.zero_grad() 97 | critic_loss.backward() 98 | self.critic_optimizer.step() 99 | 100 | # 计算actor loss 101 | actor_loss = - \ 102 | torch.mean(self.critic( 103 | torch.cat([states, self.actor(states)], dim=1))) 104 | # 优化 105 | self.actor_optimizer.zero_grad() 106 | actor_loss.backward() 107 | self.actor_optimizer.step() 108 | 109 | # 软更新两个两个目标网络 110 | self.soft_update(self.critic, self.target_critic) 111 | self.soft_update(self.actor, self.target_actor) 112 | -------------------------------------------------------------------------------- /life/policy/ppo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import nn 4 | import torch.nn.functional as F 5 | from ..utils.calculator import compute_advantage 6 | 7 | 8 | # from .ac.ac import PolicyNet,ValueNet 9 | 10 | 11 | class PolicyNet(nn.Module): 12 | def __init__(self, state_dim, hidden_dim, action_dim): 13 | super().__init__() 14 | self.fc1 = nn.Linear(state_dim, hidden_dim) 15 | self.fc2 = nn.Linear(hidden_dim, action_dim) 16 | 17 | def forward(self, state): 18 | x = F.relu(self.fc1(state)) 19 | return F.softmax(self.fc2(x), dim=1) 20 | 21 | 22 | class ValueNet(nn.Module): 23 | def __init__(self, state_dim, hidden_dim): 24 | super().__init__() 25 | self.fc1 = nn.Linear(state_dim, hidden_dim) 26 | self.fc2 = nn.Linear(hidden_dim, 1) 27 | 28 | def forward(self, state): 29 | x = F.relu(self.fc1(state)) 30 | return self.fc2(x) 31 | 32 | 33 | class PPO: 34 | """PPO 算法,采用截断的方式""" 35 | 36 | def __init__(self, state_dim, hidden_dim, action_dim, actor_lr, critic_lr, 37 | lmbda, epochs, eps, gamma, device, policy_net=PolicyNet, value_net=ValueNet): 38 | """ 39 | lmbda:广义优势估计的lambda因子 40 | epochs: 一条序列的数据用来训练的轮数 41 | eps: PPO中阶段范围的参数 42 | """ 43 | self.actor = policy_net(state_dim, hidden_dim, action_dim).to(device) 44 | self.critic = value_net(state_dim, hidden_dim).to(device) 45 | self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr) 46 | self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr) 47 | self.gamma = gamma 48 | self.lmbda = lmbda 49 | self.epochs = epochs 50 | self.eps = eps 51 | self.device = device 52 | 53 | def take_action(self, state): 54 | state = torch.tensor([state], dtype=torch.float).to(self.device) 55 | probs = self.actor(state) # 输出动作的概率分布 56 | action_dist = torch.distributions.Categorical(probs) 57 | action = action_dist.sample() 58 | return action.item() 59 | 60 | def update(self, transition_dict): 61 | # 数据类型转换 62 | states = torch.tensor(transition_dict['states'], 63 | dtype=torch.float).to(self.device) 64 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 65 | self.device) 66 | rewards = torch.tensor(transition_dict['rewards'], 67 | dtype=torch.float).view(-1, 1).to(self.device) 68 | next_states = torch.tensor(transition_dict['next_states'], 69 | dtype=torch.float).to(self.device) 70 | dones = torch.tensor(transition_dict['dones'], 71 | dtype=torch.float).view(-1, 1).to(self.device) 72 | 73 | td_target = rewards + self.gamma * self.critic(next_states) * (1 - dones) # 时序差分目标 74 | td_delta = td_target - self.critic(states) # 时序差分误差 75 | 76 | advantage = compute_advantage(self.gamma, self.lmbda, td_delta.cpu()).to(self.device) 77 | 78 | old_log_probs = torch.log(self.actor(states).gather(1, actions)).detach() # 旧策略 79 | 80 | # 对于actor每采样的一组数据,更新epoch次网络 81 | for _ in range(self.epochs): 82 | log_probs = torch.log(self.actor(states).gather(1, actions)) 83 | ratio = torch.exp(log_probs - old_log_probs) # 比值 84 | 85 | surr1 = ratio * advantage 86 | surr2 = torch.clamp(ratio, 1 - self.eps, 1 + self.eps) * advantage # 对比值进行裁剪 87 | 88 | # 计算loss 89 | actor_loss = torch.mean(-torch.min(surr1, surr2)) # 对演员的loss,使用ppo目标函数 90 | critic_loss = torch.mean(F.mse_loss(self.critic(states), td_target.detach())) 91 | 92 | # 优化 93 | self.actor_optimizer.zero_grad() 94 | self.critic_optimizer.zero_grad() 95 | actor_loss.backward() 96 | critic_loss.backward() 97 | self.actor_optimizer.step() 98 | self.critic_optimizer.step() 99 | -------------------------------------------------------------------------------- /life/policy/reinforce.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class PolicyNet(nn.Module): 7 | def __init__(self, state_dim, hidden_dim, action_dim): 8 | super().__init__() 9 | self.fc1 = nn.Linear(state_dim, hidden_dim) 10 | self.fc2 = nn.Linear(hidden_dim, action_dim) 11 | 12 | def forward(self, state): 13 | x = F.relu(self.fc1(state)) 14 | return F.softmax(self.fc2(x), dim=1) 15 | 16 | 17 | class REINFORCE: 18 | def __init__(self, state_dim, hidden_dim, action_dim, learning_rate, gamma, device, net=PolicyNet): 19 | self.policy_net = net(state_dim, hidden_dim, action_dim).to(device=device) 20 | self.optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=learning_rate) 21 | self.gamma = gamma 22 | self.device = device 23 | 24 | def take_action(self, state): 25 | """根据动作概率分布随机采样""" 26 | state = torch.tensor([state], dtype=torch.float).to(self.device) 27 | probs = self.policy_net(state) # 动作概率分布 28 | action_dist = torch.distributions.Categorical(probs=probs) # 创建分类分布 29 | action = action_dist.sample() # 从创建的分布中采样 30 | return action.item() 31 | 32 | def update(self, transition_dict): 33 | reward_list = transition_dict['rewards'] 34 | state_list = transition_dict['states'] 35 | action_list = transition_dict['actions'] 36 | 37 | G = 0 38 | self.optimizer.zero_grad() 39 | for i in reversed(range(len(reward_list))): 40 | reward = reward_list[i] 41 | state = torch.tensor([state_list[i]], dtype=torch.float).to(self.device) 42 | action = torch.tensor([action_list[i]]).view(-1, 1).to(self.device) 43 | 44 | log_prob = torch.log(self.policy_net(state).gather(1, action)) # log \pi(a|s) 45 | G = self.gamma * G + reward 46 | loss = -log_prob * G # 每一步的损失函数 47 | loss.backward() 48 | self.optimizer.step() 49 | -------------------------------------------------------------------------------- /life/policy/sac.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import torch.nn.functional as F 4 | from torch.distributions import Normal 5 | import numpy as np 6 | 7 | 8 | class PolicyNetContinuous(nn.Module): 9 | def __init__(self, state_dim, hidden_dim, action_dim, action_bound): 10 | super(PolicyNetContinuous, self).__init__() 11 | self.fc1 = nn.Linear(state_dim, hidden_dim) 12 | self.fc_mu = nn.Linear(hidden_dim, action_dim) 13 | self.fc_std = nn.Linear(hidden_dim, action_dim) 14 | self.action_bound = action_bound 15 | 16 | def forward(self, x): 17 | x = F.relu(self.fc1(x)) 18 | mu = self.fc_mu(x) 19 | std = F.softplus(self.fc_std(x)) 20 | 21 | dist = Normal(mu, std) 22 | normal_sample = dist.rsample() # 重参数化采样 23 | log_prob = dist.log_prob(normal_sample) # log (pi) 24 | 25 | action = torch.tanh(normal_sample) 26 | log_prob = log_prob - torch.log(1 - torch.tanh(action).pow(2) + 1e-7) 27 | action = action * self.action_bound 28 | 29 | return action, log_prob 30 | 31 | 32 | class QValueNetContinuous(nn.Module): 33 | def __init__(self, state_dim, hidden_dim, action_dim) -> None: 34 | super().__init__() 35 | self.fc1 = nn.Linear(state_dim + action_dim, hidden_dim) 36 | self.fc2 = nn.Linear(hidden_dim, hidden_dim) 37 | self.fc3 = nn.Linear(hidden_dim, 1) 38 | 39 | def forward(self, x, a): 40 | """state,action""" 41 | cat = torch.cat([x, a], dim=1) 42 | x = F.relu(self.fc1(cat)) 43 | x = F.relu(self.fc2(x)) 44 | return self.fc3(x) 45 | 46 | 47 | class SACContinuous: 48 | """处理连续动作的SAC算法""" 49 | 50 | def __init__(self, state_dim, hidden_dim, action_dim, action_bound, 51 | actor_lr, critic_lr, alpha_lr, 52 | target_entropy, tau, gamma, device, 53 | actor_net=PolicyNetContinuous, critic_net=QValueNetContinuous): 54 | # 5个网络 55 | self.actor = actor_net(state_dim, hidden_dim, action_dim, 56 | action_bound).to(device) # 策略网络 57 | self.critic_1 = critic_net(state_dim, hidden_dim, 58 | action_dim).to(device) # 第一个Q网络 59 | self.critic_2 = critic_net(state_dim, hidden_dim, 60 | action_dim).to(device) # 第二个Q网络 61 | self.target_critic_1 = critic_net(state_dim, 62 | hidden_dim, action_dim).to( 63 | device) # 第一个目标Q网络 64 | self.target_critic_2 = critic_net(state_dim, 65 | hidden_dim, action_dim).to( 66 | device) # 第二个目标Q网络 67 | # 令目标价值网络的初始参数和价值网络一样 68 | self.target_critic_1.load_state_dict(self.critic_1.state_dict()) 69 | self.target_critic_2.load_state_dict(self.critic_2.state_dict()) 70 | # 优化器 71 | self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), 72 | lr=actor_lr) 73 | self.critic_1_optimizer = torch.optim.Adam(self.critic_1.parameters(), 74 | lr=critic_lr) 75 | self.critic_2_optimizer = torch.optim.Adam(self.critic_2.parameters(), 76 | lr=critic_lr) 77 | # 使用alpha的Log值,可以使训练效果比较稳定 78 | self.log_alpha = torch.tensor(np.log(0.01), dtype=torch.float) 79 | self.log_alpha.requires_grad = True # 可以对alpha求梯度 80 | self.log_alpha_optimizer = torch.optim.Adam( 81 | [self.log_alpha], lr=alpha_lr) 82 | 83 | self.target_entropy = target_entropy 84 | self.gamma = gamma 85 | self.tau = tau 86 | self.device = device 87 | 88 | def take_action(self, state): 89 | state = torch.tensor([state], dtype=torch.float).to(self.device) 90 | action = self.actor(state)[0] 91 | return [action.item()] 92 | 93 | def calc_target(self, rewards, next_states, dones): 94 | """计算目标Q值""" 95 | next_actions, log_prob = self.actor(next_states) 96 | entropy = -log_prob 97 | q1_value = self.target_critic_1(next_states, next_actions) 98 | q2_value = self.target_critic_2(next_states, next_actions) 99 | next_value = torch.min(q1_value, q2_value) + self.log_alpha.exp() * entropy 100 | td_target = rewards + self.gamma * next_value * (1 - dones) 101 | return td_target 102 | 103 | def soft_update(self, net, target_net): 104 | for param_target, param in zip(target_net.parameters(), net.parameters()): 105 | param_target.data.copy_( 106 | param_target.data * (1 - self.tau) + param.data * self.tau) 107 | 108 | def update(self, transition_dict): 109 | # 数据转换 110 | states = torch.tensor(transition_dict['states'], 111 | dtype=torch.float).to(self.device) 112 | actions = torch.tensor(transition_dict['actions'], 113 | dtype=torch.float).view(-1, 1).to(self.device) 114 | rewards = torch.tensor(transition_dict['rewards'], 115 | dtype=torch.float).view(-1, 1).to(self.device) 116 | next_states = torch.tensor(transition_dict['next_states'], 117 | dtype=torch.float).to(self.device) 118 | dones = torch.tensor(transition_dict['dones'], 119 | dtype=torch.float).view(-1, 1).to(self.device) 120 | # 和之前章节一样,对倒立摆环境的奖励进行重塑以便训练 121 | rewards = (rewards + 8.0) / 8.0 122 | 123 | # 更新两个Q网络 124 | td_target = self.calc_target(rewards, next_states, dones) 125 | critic_1_loss = torch.mean(F.mse_loss(self.critic_1(states, actions), 126 | td_target.detach())) 127 | critic_2_loss = torch.mean(F.mse_loss(self.critic_2(states, actions), 128 | td_target.detach())) 129 | # 优化 130 | self.critic_1_optimizer.zero_grad() 131 | critic_1_loss.backward() 132 | self.critic_1_optimizer.step() 133 | 134 | self.critic_2_optimizer.zero_grad() 135 | critic_2_loss.backward() 136 | self.critic_2_optimizer.step() 137 | 138 | # 更新策略网络 139 | new_actions, log_prob = self.actor(states) 140 | entropy = -log_prob 141 | q1_value = self.critic_1(states, new_actions) 142 | q2_value = self.critic_2(states, new_actions) 143 | 144 | actor_loss = torch.mean(-self.log_alpha.exp() 145 | * entropy - torch.min(q1_value, q2_value)) 146 | 147 | # 优化 148 | self.actor_optimizer.zero_grad() 149 | actor_loss.backward() 150 | self.actor_optimizer.step() 151 | 152 | # 更新alpha的值 153 | alpha_loss = torch.mean( 154 | (entropy - self.target_entropy).detach() * self.log_alpha.exp()) 155 | self.log_alpha_optimizer.zero_grad() 156 | alpha_loss.backward() 157 | self.log_alpha_optimizer.step() 158 | 159 | self.soft_update(self.critic_1, self.target_critic_1) 160 | self.soft_update(self.critic_2, self.target_critic_2) 161 | 162 | 163 | class PolicyNet(nn.Module): 164 | def __init__(self, state_dim, hidden_dim, action_dim) -> None: 165 | super().__init__() 166 | self.fc1 = nn.Linear(state_dim, hidden_dim) 167 | self.fc2 = nn.Linear(hidden_dim, action_dim) 168 | 169 | def forward(self, x): 170 | x = F.relu(self.fc1(x)) 171 | return F.softmax(self.fc2(x), dim=1) 172 | 173 | 174 | class QValueNet(nn.Module): 175 | def __init__(self, state_dim, hidden_dim, action_dim): 176 | super().__init__() 177 | self.fc1 = nn.Linear(state_dim, hidden_dim) 178 | self.fc2 = nn.Linear(hidden_dim, action_dim) 179 | 180 | def forward(self, x): 181 | x = F.relu(self.fc1(x)) 182 | return self.fc2(x) 183 | 184 | 185 | class SACDiscrete: 186 | """处理离散动作的SAC""" 187 | 188 | def __init__(self, state_dim, hidden_dim, action_dim, 189 | actor_lr, critic_lr, alpha_lr, 190 | target_entropy, tau, gamma, device, 191 | actor_net=PolicyNet, critic_net=QValueNet): 192 | # 策略网络 193 | self.actor = actor_net(state_dim, hidden_dim, action_dim).to(device) 194 | # 第一个Q网络 195 | self.critic_1 = critic_net(state_dim, hidden_dim, action_dim).to(device) 196 | # 第二个Q网络 197 | self.critic_2 = critic_net(state_dim, hidden_dim, action_dim).to(device) 198 | self.target_critic_1 = critic_net(state_dim, hidden_dim, 199 | action_dim).to(device) # 第一个目标Q网络 200 | self.target_critic_2 = critic_net(state_dim, hidden_dim, 201 | action_dim).to(device) # 第二个目标Q网络 202 | # 令目标Q网络的初始参数和Q网络一样 203 | self.target_critic_1.load_state_dict(self.critic_1.state_dict()) 204 | self.target_critic_2.load_state_dict(self.critic_2.state_dict()) 205 | self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), 206 | lr=actor_lr) 207 | self.critic_1_optimizer = torch.optim.Adam(self.critic_1.parameters(), 208 | lr=critic_lr) 209 | self.critic_2_optimizer = torch.optim.Adam(self.critic_2.parameters(), 210 | lr=critic_lr) 211 | # 使用alpha的log值,可以使训练结果比较稳定 212 | self.log_alpha = torch.tensor(np.log(0.01), dtype=torch.float) 213 | self.log_alpha.requires_grad = True # 可以对alpha求梯度 214 | self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], 215 | lr=alpha_lr) 216 | self.target_entropy = target_entropy # 目标熵的大小 217 | self.gamma = gamma 218 | self.tau = tau 219 | self.device = device 220 | 221 | def take_action(self, state): 222 | state = torch.tensor([state], dtype=torch.float).to(self.device) 223 | probs = self.actor(state) 224 | action_dist = torch.distributions.Categorical(probs) 225 | action = action_dist.sample() 226 | return action.item() 227 | 228 | def calc_target(self, rewards, next_states, dones): 229 | """计算目标Q值,直接使用策略网络的输出概率进行计算""" 230 | next_probs = self.actor(next_states) 231 | next_log_probs = torch.log(next_probs + 1e-8) 232 | entropy = -torch.sum(next_probs * next_log_probs, dim=1, keepdim=True) # 计算熵 233 | 234 | q1_value = self.target_critic_1(next_states) 235 | q2_value = self.target_critic_2(next_states) 236 | q_value = torch.min(q1_value, q2_value) # q_value 237 | min_value = torch.sum(next_probs * q_value, dim=1, keepdim=True) 238 | 239 | next_value = min_value + self.log_alpha.exp() * entropy 240 | td_target = rewards + self.gamma * next_value * (1 - dones) 241 | return td_target 242 | 243 | def soft_update(self, net, target_net): 244 | """软更新target_net""" 245 | for param_target, param in zip(target_net.parameters(), net.parameters()): 246 | param_target.data.copy_(param_target.data * (1 - self.tau) + param.data * self.tau) 247 | 248 | def update(self, transition_dict): 249 | # 数据类型转换 250 | states = torch.tensor(transition_dict['states'], 251 | dtype=torch.float).to(self.device) 252 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 253 | self.device) # 动作不再是float类型 254 | rewards = torch.tensor(transition_dict['rewards'], 255 | dtype=torch.float).view(-1, 1).to(self.device) 256 | next_states = torch.tensor(transition_dict['next_states'], 257 | dtype=torch.float).to(self.device) 258 | dones = torch.tensor(transition_dict['dones'], 259 | dtype=torch.float).view(-1, 1).to(self.device) 260 | 261 | # 更新两个Q网络 262 | td_target = self.calc_target(rewards, next_states, dones) 263 | critic1_q_values = self.critic_1(states).gather(1, actions) 264 | critic1_loss = torch.mean(F.mse_loss(critic1_q_values, td_target.detach())) 265 | 266 | critic2_q_values = self.critic_2(states).gather(1, actions) 267 | critic2_loss = torch.mean(F.mse_loss(critic2_q_values, td_target.detach())) 268 | 269 | # 优化 270 | self.critic_1_optimizer.zero_grad() 271 | critic1_loss.backward() 272 | self.critic_1_optimizer.step() 273 | 274 | self.critic_2_optimizer.zero_grad() 275 | critic2_loss.backward() 276 | self.critic_2_optimizer.step() 277 | 278 | # 更新策略网络 279 | probs = self.actor(states) 280 | log_probs = torch.log(probs + 1e-8) 281 | 282 | # 直接根据概率计算熵 283 | entropy = -torch.sum(probs * log_probs, dim=1, keepdim=True) 284 | q1_value = self.critic_1(states) 285 | q2_value = self.critic_2(states) 286 | q_value = torch.min(q1_value, q2_value) 287 | min_qvalue = torch.sum(probs * q_value, dim=1, keepdim=True) 288 | # actor_loss 289 | actor_loss = torch.mean(-self.log_alpha.exp() * entropy - min_qvalue) 290 | # 优化 291 | self.actor_optimizer.zero_grad() 292 | actor_loss.backward() 293 | self.actor_optimizer.step() 294 | 295 | # 更新alpha的值 296 | alpha_loss = torch.mean((entropy - target_entropy).detach() * self.log_alpha.exp()) 297 | self.log_alpha_optimizer.zero_grad() 298 | alpha_loss.backward() 299 | self.log_alpha_optimizer.step() 300 | 301 | self.soft_update(self.critic_1, self.target_critic_1) 302 | self.soft_update(self.critic_2, self.target_critic_2) 303 | -------------------------------------------------------------------------------- /life/policy/trainer.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | import numpy as np 3 | 4 | 5 | def train_reinforce(agent, env, num_episodes, return_agent=False): 6 | """REINFORCE算法的训练函数""" 7 | return_list = [] 8 | for i in range(10): 9 | with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar: 10 | for i_episode in range(int(num_episodes / 10)): 11 | episode_return = 0 12 | transition_dict = { 13 | 'states': [], 14 | 'actions': [], 15 | 'next_states': [], 16 | 'rewards': [], 17 | 'dones': [] 18 | } 19 | state = env.reset() 20 | done = False 21 | while not done: 22 | action = agent.take_action(state) 23 | next_state, reward, done, _ = env.step(action) 24 | transition_dict['states'].append(state) 25 | transition_dict['actions'].append(action) 26 | transition_dict['next_states'].append(next_state) 27 | transition_dict['rewards'].append(reward) 28 | transition_dict['dones'].append(done) 29 | state = next_state 30 | episode_return += reward 31 | return_list.append(episode_return) 32 | agent.update(transition_dict) 33 | if (i_episode + 1) % 10 == 0: 34 | pbar.set_postfix({ 35 | 'episode': 36 | '%d' % (num_episodes / 10 * i + i_episode + 1), 37 | 'return': 38 | '%.3f' % np.mean(return_list[-10:]) 39 | }) 40 | pbar.update(1) 41 | if return_agent: 42 | return return_list, agent 43 | return return_list 44 | 45 | 46 | def train_ac(agent, env, num_episodes, return_agent=False): 47 | """ac算法的训练函数""" 48 | out = train_reinforce(agent, env, num_episodes, return_agent=return_agent) 49 | return out 50 | 51 | 52 | def train_ppo(agent, env, num_episodes, return_agent=False): 53 | """ppo算法的训练函数""" 54 | out = train_reinforce(agent, env, num_episodes, return_agent=return_agent) 55 | return out 56 | 57 | 58 | # ################################################# 59 | # 深度确定性策略梯度属于off policy 60 | def train_ddpg(env, agent, num_episodes, replay_buffer, minimal_size, batch_size, return_agent=False): 61 | """DDPG算法的训练函数""" 62 | return_list = [] 63 | for i in range(10): 64 | with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar: 65 | for i_episode in range(int(num_episodes / 10)): 66 | episode_return = 0 67 | state = env.reset() 68 | done = False 69 | while not done: 70 | action = agent.take_action(state) 71 | next_state, reward, done, _ = env.step(action) 72 | replay_buffer.add(state, action, reward, next_state, done) 73 | state = next_state 74 | episode_return += reward 75 | if replay_buffer.size() > minimal_size: 76 | b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size) 77 | transition_dict = {'states': b_s, 'actions': b_a, 'next_states': b_ns, 'rewards': b_r, 78 | 'dones': b_d} 79 | agent.update(transition_dict) 80 | return_list.append(episode_return) 81 | if (i_episode + 1) % 10 == 0: 82 | pbar.set_postfix({'episode': '%d' % (num_episodes / 10 * i + i_episode + 1), 83 | 'return': '%.3f' % np.mean(return_list[-10:])}) 84 | pbar.update(1) 85 | if return_agent: 86 | return return_list, agent 87 | return return_list 88 | 89 | 90 | def train_sac(env, agent, num_episodes, replay_buffer, minimal_size, batch_size, return_agent=False): 91 | """训练sac算法的函数""" 92 | out = train_ddpg(env, agent, num_episodes, replay_buffer, minimal_size, batch_size, return_agent=return_agent) 93 | return out 94 | -------------------------------------------------------------------------------- /life/test/test_dqn.py: -------------------------------------------------------------------------------- 1 | from life.dqn.dqn_improved import DuelingDQN 2 | from life.dqn.trainer import train 3 | from life.utils.replay.replay_buffer import ReplayBuffer 4 | from life.envs.con_env_demo import make 5 | import gym 6 | import torch 7 | 8 | lr = 2e-3 9 | num_episodes = 500 10 | hidden_dim = 128 11 | gamma = 0.98 12 | epsilon = 0.01 13 | target_update = 10 14 | buffer_size = 10000 15 | minimal_size = 500 16 | batch_size = 64 17 | device = torch.device("cpu") 18 | env = make() 19 | state_dim = env.observation_space.shape[0] 20 | action_dim = 11 21 | agent = DuelingDQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon, 22 | target_update, device) 23 | replay_buffer = ReplayBuffer(buffer_size) 24 | result = train(agent, env, replay_buffer, minimal_size, batch_size, con_act=True) 25 | print(result) 26 | -------------------------------------------------------------------------------- /life/test/test_off_policy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from life.policy.sac import SACContinuous 3 | from life.policy.trainer import train_sac 4 | from life.envs.con_env_demo import make 5 | from life.utils.replay.replay_buffer import ReplayBuffer 6 | 7 | env = make() 8 | state_dim = env.observation_space.shape[0] 9 | action_dim = env.action_space.shape[0] 10 | action_bound = env.action_space.high[0] # 动作最大值 11 | 12 | actor_lr = 3e-4 13 | critic_lr = 3e-3 14 | alpha_lr = 3e-4 15 | num_episodes = 100 16 | hidden_dim = 128 17 | gamma = 0.99 18 | tau = 0.005 # 软更新参数 19 | buffer_size = 100000 20 | minimal_size = 1000 21 | batch_size = 64 22 | target_entropy = -env.action_space.shape[0] 23 | device = torch.device("cpu") 24 | 25 | replay_buffer = ReplayBuffer(buffer_size) 26 | agent = SACContinuous(state_dim, hidden_dim, action_dim, action_bound, 27 | actor_lr, critic_lr, alpha_lr, target_entropy, tau, 28 | gamma, device) 29 | 30 | result = train_sac(env, agent, num_episodes, replay_buffer, 31 | minimal_size, batch_size) 32 | -------------------------------------------------------------------------------- /life/test/test_on_policy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from life.policy.ppo import PPO 3 | from life.policy.trainer import train_ppo 4 | from life.envs.dis_env_demo import make 5 | 6 | actor_lr = 1e-3 7 | critic_lr = 1e-2 8 | num_episodes = 500 9 | hidden_dim = 128 10 | gamma = 0.98 11 | lmbda = 0.95 12 | epochs = 10 13 | eps = 0.2 14 | device = torch.device("cpu") 15 | 16 | env = make() 17 | env.seed(0) 18 | torch.manual_seed(0) 19 | state_dim = env.observation_space.shape[0] 20 | action_dim = env.action_space.n 21 | agent = PPO(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, lmbda, 22 | epochs, eps, gamma, device) 23 | result = train_ppo(agent, env, num_episodes) 24 | -------------------------------------------------------------------------------- /life/test/test_ql.py: -------------------------------------------------------------------------------- 1 | from life.base.q_learning import QLearning 2 | from life.base.trainer import train_qlearning 3 | from life.envs.cliffwalking import CliffWalkingEnv 4 | 5 | agent = QLearning(12 * 4, 0.1, 0.1, 0.9) 6 | env = CliffWalkingEnv(12, 4) 7 | result = train_qlearning(env, agent, ) 8 | 9 | print(result) 10 | -------------------------------------------------------------------------------- /life/utils/__pycache__/calculator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/utils/__pycache__/calculator.cpython-37.pyc -------------------------------------------------------------------------------- /life/utils/__pycache__/cont2disp.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/utils/__pycache__/cont2disp.cpython-37.pyc -------------------------------------------------------------------------------- /life/utils/calculator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | def compute_advantage(gamma, lmbda, td_delta): 6 | """计算优势函数""" 7 | td_delta = td_delta.detach().numpy() 8 | advantage_list = [] 9 | advantage = 0.0 10 | for delta in td_delta[::-1]: 11 | advantage = gamma * lmbda * advantage + delta 12 | advantage_list.append(advantage) # 妙啊,边累计计算advantage,边加入列表 以保存每一个时间步的advantage. 13 | advantage_list.reverse() 14 | return torch.tensor(advantage_list, dtype=torch.float) 15 | 16 | 17 | def sample_expert_data(env, agent, n_episodes): 18 | """ 19 | 生成专家的与环境交互的轨迹数据 20 | env:专家所在的环境 21 | agent:专家智能体 22 | n_episode:轨迹个数 23 | """ 24 | states = [] 25 | actions = [] 26 | for episode in range(n_episodes): 27 | state = env.reset() 28 | done = False 29 | 30 | while not done: 31 | action = agent.take_action(state) 32 | states.append(state) 33 | actions.append(action) 34 | 35 | next_state, reward, done, _ = env.step(action) 36 | state = next_state 37 | return np.array(states), np.array(actions) 38 | 39 | 40 | def moving_average(a, window_size): 41 | """数据平滑处理""" 42 | cumulative_sum = np.cumsum(np.insert(a, 0, 0)) 43 | middle = (cumulative_sum[window_size:] - cumulative_sum[:-window_size]) / window_size 44 | r = np.arange(1, window_size-1, 2) 45 | begin = np.cumsum(a[:window_size-1])[::2] / r 46 | end = (np.cumsum(a[:-window_size:-1])[::2] / r)[::-1] 47 | return np.concatenate((begin, middle, end)) -------------------------------------------------------------------------------- /life/utils/cont2disp.py: -------------------------------------------------------------------------------- 1 | def dis2con(discrete_action, env, action_dim): 2 | """离散动作 转为 连续动作的函数(将[0,1,2,..,10]映射到[-2,-1.6,...,1.6,2])""" 3 | action_low = env.action_space.low[0] # 连续动作的最小值 4 | action_up = env.action_space.high[0] # 连续动作的最大值 5 | out = action_low + (discrete_action / (action_dim - 1)) * (action_up - action_low) 6 | return out 7 | -------------------------------------------------------------------------------- /life/utils/replay/__pycache__/replay_buffer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanggeAi/Life/10c4d37fb1112ac017ca1239f85e7874cb51aa32/life/utils/replay/__pycache__/replay_buffer.cpython-37.pyc -------------------------------------------------------------------------------- /life/utils/replay/per_replay_buffer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | 4 | 5 | class SumTree: 6 | def __init__(self, capacity: int): 7 | self.capacity = capacity # 叶子节点个数 8 | self.data_pointer = 0 9 | self.n_entries = 0 10 | self.tree = np.zeros(2 * capacity - 1) # 树中总的节点个数 11 | self.data = np.zeros(capacity, dtype=object) 12 | 13 | def update(self, tree_idx, p): 14 | """Update the sampling weight 15 | """ 16 | change = p - self.tree[tree_idx] 17 | self.tree[tree_idx] = p 18 | 19 | while tree_idx != 0: 20 | tree_idx = (tree_idx - 1) // 2 21 | self.tree[tree_idx] += change 22 | 23 | def add(self, p, data): 24 | """Adding new data to the sumTree 25 | """ 26 | tree_idx = self.data_pointer + self.capacity - 1 27 | self.data[self.data_pointer] = data 28 | # print ("tree_idx=", tree_idx) 29 | # print ("nonzero = ", np.count_nonzero(self.tree)) 30 | self.update(tree_idx, p) 31 | 32 | self.data_pointer += 1 33 | if self.data_pointer >= self.capacity: 34 | self.data_pointer = 0 35 | 36 | if self.n_entries < self.capacity: 37 | self.n_entries += 1 38 | 39 | def get_leaf(self, v): 40 | """Sampling the data 41 | """ 42 | parent_idx = 0 43 | while True: 44 | cl_idx = 2 * parent_idx + 1 45 | cr_idx = cl_idx + 1 46 | if cl_idx >= len(self.tree): 47 | leaf_idx = parent_idx 48 | break 49 | else: 50 | if v <= self.tree[cl_idx]: 51 | parent_idx = cl_idx 52 | else: 53 | v -= self.tree[cl_idx] 54 | parent_idx = cr_idx 55 | 56 | data_idx = leaf_idx - self.capacity + 1 57 | return leaf_idx, self.tree[leaf_idx], self.data[data_idx] 58 | 59 | def total(self): 60 | return int(self.tree[0]) 61 | 62 | 63 | class ReplayTree: 64 | """ReplayTree for the per(Prioritized Experience Replay) DQN. 65 | """ 66 | 67 | def __init__(self, capacity): 68 | self.capacity = capacity # the capacity for memory replay 69 | self.tree = SumTree(capacity) 70 | self.abs_err_upper = 1. 71 | 72 | self.beta_increment_per_sampling = 0.001 73 | self.alpha = 0.6 74 | self.beta = 0.4 75 | self.epsilon = 0.01 76 | self.abs_err_upper = 1. 77 | 78 | def __len__(self): 79 | """ return the num of storage 80 | """ 81 | return self.tree.total() 82 | 83 | def push(self, error, sample): 84 | """Push the sample into the replay according to the importance sampling weight 85 | """ 86 | p = (np.abs(error) + self.epsilon) ** self.alpha 87 | self.tree.add(p, sample) 88 | 89 | def sample(self, batch_size): 90 | """This is for sampling a batch data and the original code is from: 91 | https://github.com/rlcode/per/blob/master/prioritized_memory.py 92 | """ 93 | pri_segment = self.tree.total() / batch_size 94 | 95 | priorities = [] 96 | batch = [] 97 | idxs = [] 98 | 99 | self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) 100 | 101 | for i in range(batch_size): 102 | a = pri_segment * i 103 | b = pri_segment * (i + 1) 104 | 105 | s = random.uniform(a, b) 106 | idx, p, data = self.tree.get_leaf(s) 107 | 108 | priorities.append(p) 109 | batch.append(data) 110 | idxs.append(idx) 111 | 112 | sampling_probabilities = np.array(priorities) / self.tree.total() 113 | is_weights = np.power(self.tree.n_entries * sampling_probabilities, -self.beta) 114 | is_weights /= is_weights.max() 115 | 116 | return zip(*batch), idxs, is_weights 117 | 118 | def batch_update(self, tree_idx, abs_errors): 119 | """Update the importance sampling weight 120 | """ 121 | abs_errors += self.epsilon 122 | 123 | clipped_errors = np.minimum(abs_errors, self.abs_err_upper) 124 | ps = np.power(clipped_errors, self.alpha) 125 | 126 | for ti, p in zip(tree_idx, ps): 127 | self.tree.update(ti, p) 128 | -------------------------------------------------------------------------------- /life/utils/replay/replay_buffer.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import random 3 | import numpy as np 4 | 5 | 6 | class ReplayBuffer: 7 | ''' 经验回放池 ''' 8 | 9 | def __init__(self, capacity): 10 | self.buffer = collections.deque(maxlen=capacity) # 队列,先进先出 11 | 12 | def add(self, state, action, reward, next_state, done): # 将数据加入buffer 13 | self.buffer.append((state, action, reward, next_state, done)) 14 | 15 | def sample(self, batch_size): # 从buffer中采样数据,数量为batch_size 16 | transitions = random.sample(self.buffer, batch_size) 17 | state, action, reward, next_state, done = zip(*transitions) 18 | return np.array(state), action, reward, np.array(next_state), done 19 | 20 | def size(self): # 目前buffer中数据的数量 21 | return len(self.buffer) 22 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # This is a sample Python script. 2 | 3 | # Press Shift+F10 to execute it or replace it with your code. 4 | # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. 5 | 6 | 7 | def print_hi(name): 8 | # Use a breakpoint in the code line below to debug your script. 9 | print(f'Hello , {name}') # Press Ctrl+F8 to toggle the breakpoint. 10 | 11 | 12 | # Press the green button in the gutter to run the script. 13 | if __name__ == '__main__': 14 | print_hi('Life !') 15 | 16 | # See PyCharm help at https://www.jetbrains.com/help/pycharm/ 17 | --------------------------------------------------------------------------------